From 58b2145a5c57f44572c9ed52925023dbe93aad72 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Thu, 17 Dec 2020 10:40:07 +0800 Subject: [PATCH 01/36] prepare_tensorlayer3.0 --- examples/README.md | 23 - examples/basic_tutorials/README.md | 13 - ...tic.py => tutorial_cifar10_cnn_dynamic.py} | 104 +- .../basic_tutorials/tutorial_mindspore.py | 117 + .../tutorial_mnist_mlp_dynamci_dragon.py | 100 + .../tutorial_mnist_mlp_dynamic.py | 65 +- .../tutorial_mnist_mlp_dynamic_2.py | 131 - .../tutorial_mnist_mlp_static.py | 89 - .../tutorial_mnist_mlp_static_2.py | 99 - .../basic_tutorials/tutorial_mnist_siamese.py | 142 - .../basic_tutorials/tutorial_mnist_simple.py | 110 +- .../tutorial_ms_cifar10_simple.py | 163 + .../tutorial_tensorlayer_mindspore.py | 92 + examples/data_process/README.md | 10 - examples/data_process/data/.DS_Store | Bin 6148 -> 0 bytes examples/data_process/data/__init__.py | 3 - examples/data_process/data/cat/img1.jpg | Bin 6435 -> 0 bytes examples/data_process/data/cat/img2.jpg | Bin 5487 -> 0 bytes examples/data_process/data/cat/img3.jpg | Bin 7478 -> 0 bytes examples/data_process/data/cat/img4.jpg | Bin 7066 -> 0 bytes examples/data_process/data/cat/img5.jpg | Bin 5041 -> 0 bytes examples/data_process/data/cat/img6.jpg | Bin 4870 -> 0 bytes examples/data_process/data/cat/img7.jpg | Bin 3193 -> 0 bytes examples/data_process/data/cat/img8.jpg | Bin 6950 -> 0 bytes examples/data_process/data/cat/img9.jpg | Bin 5381 -> 0 bytes examples/data_process/data/cat_caption.json | 19 - examples/data_process/data/dog/img1.jpg | Bin 4612 -> 0 bytes examples/data_process/data/dog/img2.jpg | Bin 3797 -> 0 bytes examples/data_process/data/dog/img3.jpg | Bin 4897 -> 0 bytes examples/data_process/data/dog/img4.jpg | Bin 4603 -> 0 bytes examples/data_process/data/dog/img5.jpg | Bin 6028 -> 0 bytes examples/data_process/data/dog/img6.jpg | Bin 3782 -> 0 bytes examples/data_process/data/dog/img7.jpg | Bin 5337 -> 0 bytes examples/data_process/data/dog/img8.jpg | Bin 5610 -> 0 bytes examples/data_process/data/dog/img9.jpg | Bin 3492 -> 0 bytes .../data_process/data/greenbackground/0.jpg | Bin 18006 -> 0 bytes .../data_process/data/greenbackground/1.jpg | Bin 30505 -> 0 bytes examples/data_process/data/tiger.jpeg | Bin 12270 -> 0 bytes .../tutorial_fast_affine_transform.py | 139 - .../data_process/tutorial_tf_dataset_voc.py | 113 - examples/data_process/tutorial_tfrecord.py | 98 - examples/data_process/tutorial_tfrecord2.py | 90 - examples/data_process/tutorial_tfrecord3.py | 464 - examples/database/README.md | 19 - examples/database/dispatch_tasks.py | 51 - examples/database/run_tasks.py | 19 - examples/database/task_script.py | 71 - .../tutorial_image_preprocess.py | 28 - ...torial_imagenet_inceptionV3_distributed.py | 452 - .../tutorial_mnist_distributed.py | 83 - .../tutorial_mnist_distributed.yml | 87 - examples/distributed_training/README.md | 1 - .../tutorial_cifar10_distributed_trainer.py | 124 - .../tutorial_mnist_distributed_trainer.py | 76 - examples/keras_tfslim/README.md | 1 - examples/keras_tfslim/tutorial_keras.py | 77 - examples/pretrained_cnn/README.md | 2 - examples/pretrained_cnn/data/__init__.py | 3 - .../data/imagenet_class_index.json | 1 - .../pretrained_cnn/data/imagenet_classes.py | 1000 -- examples/pretrained_cnn/data/laska.png | Bin 101910 -> 0 bytes examples/pretrained_cnn/data/puzzle.jpeg | Bin 8892 -> 0 bytes examples/pretrained_cnn/data/tiger.jpeg | Bin 12270 -> 0 bytes ...torial_load_ckpt_weights_to_tensorlayer.py | 70 - .../tutorial_models_mobilenetv1.py | 34 - .../tutorial_models_resnet50.py | 34 - .../tutorial_models_squeezenetv1.py | 30 - .../pretrained_cnn/tutorial_models_vgg16.py | 27 - .../pretrained_cnn/tutorial_models_vgg19.py | 27 - .../tutorial_models_vgg_static.py | 27 - examples/quantized_net/README.md | 6 - .../tutorial_binarynet_cifar10_tfrecord.py | 218 - .../tutorial_binarynet_mnist_cnn.py | 106 - .../tutorial_dorefanet_cifar10_tfrecord.py | 211 - .../tutorial_dorefanet_mnist_cnn.py | 101 - .../tutorial_quanconv_cifar10.py | 208 - .../quantized_net/tutorial_quanconv_mnist.py | 116 - ...tutorial_ternaryweight_cifar10_tfrecord.py | 221 - .../tutorial_ternaryweight_mnist_cnn.py | 102 - examples/reinforcement_learning/.gitignore | 2 - examples/reinforcement_learning/README.md | 364 - .../reinforcement_learning/tutorial_A3C.py | 323 - .../reinforcement_learning/tutorial_AC.py | 277 - .../reinforcement_learning/tutorial_C51.py | 343 - .../reinforcement_learning/tutorial_DDPG.py | 305 - .../reinforcement_learning/tutorial_DPPO.py | 378 - .../reinforcement_learning/tutorial_DQN.py | 182 - .../tutorial_DQN_variants.py | 433 - .../reinforcement_learning/tutorial_PG.py | 233 - .../reinforcement_learning/tutorial_PPO.py | 322 - .../tutorial_Qlearning.py | 113 - .../reinforcement_learning/tutorial_SAC.py | 453 - .../reinforcement_learning/tutorial_TD3.py | 436 - .../reinforcement_learning/tutorial_TRPO.py | 512 - .../tutorial_atari_pong.py | 147 - .../reinforcement_learning/tutorial_format.py | 98 - .../tutorial_prioritized_replay.py | 527 - .../tutorial_wrappers.py | 563 - .../spatial_transformer_network/README.md | 44 - ...ial_spatial_transformer_network_dynamic.py | 167 - ...rial_spatial_transformer_network_static.py | 164 - examples/text_classification/readme.md | 29 - .../tutorial_imdb_fasttext.py | 175 - examples/text_generation/README.md | 0 examples/text_generation/data/.DS_Store | Bin 6148 -> 0 bytes examples/text_generation/data/__init__.py | 5 - .../text_generation/data/trump/trump_text.txt | 7332 ---------- .../data/trump/trump_twitter.txt | 3434 ----- examples/text_generation/data/word_counts.txt | 11519 ---------------- .../text_generation/tutorial_generate_text.py | 332 - examples/text_ptb/README.md | 1 - examples/text_ptb/tutorial_ptb_lstm.py | 523 - .../tutorial_ptb_lstm_state_is_tuple.py | 618 - .../tutorial_word2vec_basic.py | 373 - .../text_word_embedding/word2vec_basic.pdf | Bin 113953 -> 0 bytes examples/tutorial_work_with_onnx.py | 343 - tensorlayer/__init__.py | 6 + tensorlayer/activation.py | 30 +- tensorlayer/backend/__init__.py | 5 + tensorlayer/backend/ops/__init__.py | 115 + tensorlayer/backend/ops/dragon_backend.py | 989 ++ tensorlayer/backend/ops/dragon_nn.py | 910 ++ tensorlayer/backend/ops/load_backend.py | 73 + tensorlayer/backend/ops/mindspore_backend.py | 1131 ++ tensorlayer/backend/ops/mindspore_nn.py | 1187 ++ tensorlayer/backend/ops/tensorflow_backend.py | 956 ++ tensorlayer/backend/ops/tensorflow_nn.py | 1299 ++ tensorlayer/cost/__init__.py | 13 + tensorlayer/cost/mindspore_cost.py | 763 + .../{cost.py => cost/tensorflow_cost.py} | 6 +- tensorlayer/dataflow/__init__.py | 5 + tensorlayer/dataflow/base.py | 18 + tensorlayer/dataflow/common.py | 34 + tensorlayer/dataflow/dataflow_examples.py | 56 + tensorlayer/dataflow/image/__init__.py | 2 + tensorlayer/dataflow/load_data_backend.py | 9 + tensorlayer/dataflow/mindspore_data.py | 44 + tensorlayer/dataflow/tensorflow_data.py | 44 + tensorlayer/decorators/__init__.py | 2 +- tensorlayer/files/__init__.py | 6 +- tensorlayer/files/utils.py | 157 +- tensorlayer/initializers.py | 53 +- tensorlayer/layers/__init__.py | 4 +- tensorlayer/layers/activation.py | 106 +- tensorlayer/layers/convolution/__init__.py | 64 +- tensorlayer/layers/convolution/binary_conv.py | 159 - .../layers/convolution/deformable_conv.py | 371 - .../layers/convolution/depthwise_conv.py | 52 +- tensorlayer/layers/convolution/dorefa_conv.py | 169 - tensorlayer/layers/convolution/expert_conv.py | 372 - .../layers/convolution/expert_deconv.py | 397 - tensorlayer/layers/convolution/group_conv.py | 157 - tensorlayer/layers/convolution/quan_conv.py | 170 - .../layers/convolution/quan_conv_bn.py | 234 - .../layers/convolution/separable_conv.py | 307 - .../layers/convolution/simplified_conv.py | 137 +- .../layers/convolution/simplified_deconv.py | 273 - .../layers/convolution/super_resolution.py | 202 - .../layers/convolution/ternary_conv.py | 162 - tensorlayer/layers/core.py | 730 - tensorlayer/layers/core/__init__.py | 8 + tensorlayer/layers/core/common.py | 34 + tensorlayer/layers/core/core_mindspore.py | 379 + .../layers/core/core_tensorflow_dragon.py | 666 + tensorlayer/layers/dense/__init__.py | 18 +- tensorlayer/layers/dense/base_dense.py | 39 +- tensorlayer/layers/dense/binary_dense.py | 106 - tensorlayer/layers/dense/dorefa_dense.py | 113 - tensorlayer/layers/dense/dropconnect.py | 20 +- tensorlayer/layers/dense/quan_dense.py | 14 +- tensorlayer/layers/dense/quan_dense_bn.py | 194 - tensorlayer/layers/dense/ternary_dense.py | 108 - tensorlayer/layers/dropout.py | 26 +- tensorlayer/layers/embedding.py | 106 +- tensorlayer/layers/extend.py | 18 +- tensorlayer/layers/image_resampling.py | 42 +- tensorlayer/layers/inputs.py | 63 +- tensorlayer/layers/lambda_layers.py | 283 - tensorlayer/layers/merge.py | 40 +- tensorlayer/layers/noise.py | 21 +- tensorlayer/layers/normalization.py | 706 +- tensorlayer/layers/padding.py | 25 +- tensorlayer/layers/pooling.py | 285 +- tensorlayer/layers/quantize.py | 13 +- tensorlayer/layers/recurrent.py | 1265 -- tensorlayer/layers/scale.py | 14 +- tensorlayer/layers/shape.py | 73 +- tensorlayer/layers/spatial_transformer.py | 143 +- tensorlayer/layers/stack.py | 18 +- tensorlayer/layers/utils.py | 29 +- tensorlayer/logging/__init__.py | 2 +- tensorlayer/logging/contrib/__init__.py | 2 +- tensorlayer/models/__init__.py | 12 +- tensorlayer/models/core.py | 1167 +- tensorlayer/models/imagenet_class_index.json | 1 - tensorlayer/models/imagenet_classes.py | 1003 -- tensorlayer/models/mobilenetv1.py | 118 - ...et50_weights_tf_dim_ordering_tf_kernels.h5 | Bin 0 -> 24576 bytes tensorlayer/models/resnet.py | 203 - tensorlayer/models/seq2seq.py | 163 - tensorlayer/models/seq2seq_with_attention.py | 210 - tensorlayer/models/squeezenetv1.py | 111 - tensorlayer/models/vgg.py | 366 - tensorlayer/optimizers/__init__.py | 15 +- tensorlayer/optimizers/dragon_optimizers.py | 56 + .../optimizers/load_optimizers_backend.py | 14 + tensorlayer/optimizers/mindspore_optimizer.py | 158 + .../optimizers/tensorflow_optimizer.py | 45 + tensorlayer/package_info.py | 2 +- tensorlayer/rein.py | 4 +- 210 files changed, 10810 insertions(+), 46577 deletions(-) delete mode 100644 examples/README.md delete mode 100644 examples/basic_tutorials/README.md rename examples/basic_tutorials/{tutorial_cifar10_cnn_static.py => tutorial_cifar10_cnn_dynamic.py} (64%) create mode 100644 examples/basic_tutorials/tutorial_mindspore.py create mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_static.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_static_2.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_siamese.py create mode 100644 examples/basic_tutorials/tutorial_ms_cifar10_simple.py create mode 100644 examples/basic_tutorials/tutorial_tensorlayer_mindspore.py delete mode 100644 examples/data_process/README.md delete mode 100644 examples/data_process/data/.DS_Store delete mode 100644 examples/data_process/data/__init__.py delete mode 100644 examples/data_process/data/cat/img1.jpg delete mode 100644 examples/data_process/data/cat/img2.jpg delete mode 100644 examples/data_process/data/cat/img3.jpg delete mode 100644 examples/data_process/data/cat/img4.jpg delete mode 100644 examples/data_process/data/cat/img5.jpg delete mode 100644 examples/data_process/data/cat/img6.jpg delete mode 100644 examples/data_process/data/cat/img7.jpg delete mode 100644 examples/data_process/data/cat/img8.jpg delete mode 100644 examples/data_process/data/cat/img9.jpg delete mode 100644 examples/data_process/data/cat_caption.json delete mode 100644 examples/data_process/data/dog/img1.jpg delete mode 100644 examples/data_process/data/dog/img2.jpg delete mode 100644 examples/data_process/data/dog/img3.jpg delete mode 100644 examples/data_process/data/dog/img4.jpg delete mode 100644 examples/data_process/data/dog/img5.jpg delete mode 100644 examples/data_process/data/dog/img6.jpg delete mode 100644 examples/data_process/data/dog/img7.jpg delete mode 100644 examples/data_process/data/dog/img8.jpg delete mode 100644 examples/data_process/data/dog/img9.jpg delete mode 100644 examples/data_process/data/greenbackground/0.jpg delete mode 100644 examples/data_process/data/greenbackground/1.jpg delete mode 100755 examples/data_process/data/tiger.jpeg delete mode 100644 examples/data_process/tutorial_fast_affine_transform.py delete mode 100644 examples/data_process/tutorial_tf_dataset_voc.py delete mode 100644 examples/data_process/tutorial_tfrecord.py delete mode 100755 examples/data_process/tutorial_tfrecord2.py delete mode 100644 examples/data_process/tutorial_tfrecord3.py delete mode 100644 examples/database/README.md delete mode 100644 examples/database/dispatch_tasks.py delete mode 100644 examples/database/run_tasks.py delete mode 100644 examples/database/task_script.py delete mode 100755 examples/deprecated_tutorials/tutorial_image_preprocess.py delete mode 100644 examples/deprecated_tutorials/tutorial_imagenet_inceptionV3_distributed.py delete mode 100644 examples/deprecated_tutorials/tutorial_mnist_distributed.py delete mode 100644 examples/deprecated_tutorials/tutorial_mnist_distributed.yml delete mode 100644 examples/distributed_training/README.md delete mode 100644 examples/distributed_training/tutorial_cifar10_distributed_trainer.py delete mode 100755 examples/distributed_training/tutorial_mnist_distributed_trainer.py delete mode 100644 examples/keras_tfslim/README.md delete mode 100644 examples/keras_tfslim/tutorial_keras.py delete mode 100644 examples/pretrained_cnn/README.md delete mode 100644 examples/pretrained_cnn/data/__init__.py delete mode 100644 examples/pretrained_cnn/data/imagenet_class_index.json delete mode 100644 examples/pretrained_cnn/data/imagenet_classes.py delete mode 100644 examples/pretrained_cnn/data/laska.png delete mode 100755 examples/pretrained_cnn/data/puzzle.jpeg delete mode 100755 examples/pretrained_cnn/data/tiger.jpeg delete mode 100644 examples/pretrained_cnn/tutorial_load_ckpt_weights_to_tensorlayer.py delete mode 100644 examples/pretrained_cnn/tutorial_models_mobilenetv1.py delete mode 100644 examples/pretrained_cnn/tutorial_models_resnet50.py delete mode 100644 examples/pretrained_cnn/tutorial_models_squeezenetv1.py delete mode 100644 examples/pretrained_cnn/tutorial_models_vgg16.py delete mode 100644 examples/pretrained_cnn/tutorial_models_vgg19.py delete mode 100644 examples/pretrained_cnn/tutorial_models_vgg_static.py delete mode 100644 examples/quantized_net/README.md delete mode 100644 examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py delete mode 100644 examples/quantized_net/tutorial_binarynet_mnist_cnn.py delete mode 100644 examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py delete mode 100644 examples/quantized_net/tutorial_dorefanet_mnist_cnn.py delete mode 100644 examples/quantized_net/tutorial_quanconv_cifar10.py delete mode 100644 examples/quantized_net/tutorial_quanconv_mnist.py delete mode 100644 examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py delete mode 100644 examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py delete mode 100644 examples/reinforcement_learning/.gitignore delete mode 100644 examples/reinforcement_learning/README.md delete mode 100644 examples/reinforcement_learning/tutorial_A3C.py delete mode 100644 examples/reinforcement_learning/tutorial_AC.py delete mode 100644 examples/reinforcement_learning/tutorial_C51.py delete mode 100644 examples/reinforcement_learning/tutorial_DDPG.py delete mode 100644 examples/reinforcement_learning/tutorial_DPPO.py delete mode 100644 examples/reinforcement_learning/tutorial_DQN.py delete mode 100644 examples/reinforcement_learning/tutorial_DQN_variants.py delete mode 100644 examples/reinforcement_learning/tutorial_PG.py delete mode 100644 examples/reinforcement_learning/tutorial_PPO.py delete mode 100644 examples/reinforcement_learning/tutorial_Qlearning.py delete mode 100644 examples/reinforcement_learning/tutorial_SAC.py delete mode 100644 examples/reinforcement_learning/tutorial_TD3.py delete mode 100644 examples/reinforcement_learning/tutorial_TRPO.py delete mode 100644 examples/reinforcement_learning/tutorial_atari_pong.py delete mode 100644 examples/reinforcement_learning/tutorial_format.py delete mode 100644 examples/reinforcement_learning/tutorial_prioritized_replay.py delete mode 100644 examples/reinforcement_learning/tutorial_wrappers.py delete mode 100644 examples/spatial_transformer_network/README.md delete mode 100644 examples/spatial_transformer_network/tutorial_spatial_transformer_network_dynamic.py delete mode 100644 examples/spatial_transformer_network/tutorial_spatial_transformer_network_static.py delete mode 100644 examples/text_classification/readme.md delete mode 100644 examples/text_classification/tutorial_imdb_fasttext.py delete mode 100644 examples/text_generation/README.md delete mode 100644 examples/text_generation/data/.DS_Store delete mode 100644 examples/text_generation/data/__init__.py delete mode 100644 examples/text_generation/data/trump/trump_text.txt delete mode 100644 examples/text_generation/data/trump/trump_twitter.txt delete mode 100755 examples/text_generation/data/word_counts.txt delete mode 100644 examples/text_generation/tutorial_generate_text.py delete mode 100644 examples/text_ptb/README.md delete mode 100644 examples/text_ptb/tutorial_ptb_lstm.py delete mode 100644 examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py delete mode 100644 examples/text_word_embedding/tutorial_word2vec_basic.py delete mode 100644 examples/text_word_embedding/word2vec_basic.pdf delete mode 100644 examples/tutorial_work_with_onnx.py create mode 100644 tensorlayer/backend/__init__.py create mode 100644 tensorlayer/backend/ops/__init__.py create mode 100644 tensorlayer/backend/ops/dragon_backend.py create mode 100644 tensorlayer/backend/ops/dragon_nn.py create mode 100644 tensorlayer/backend/ops/load_backend.py create mode 100644 tensorlayer/backend/ops/mindspore_backend.py create mode 100644 tensorlayer/backend/ops/mindspore_nn.py create mode 100644 tensorlayer/backend/ops/tensorflow_backend.py create mode 100644 tensorlayer/backend/ops/tensorflow_nn.py create mode 100644 tensorlayer/cost/__init__.py create mode 100644 tensorlayer/cost/mindspore_cost.py rename tensorlayer/{cost.py => cost/tensorflow_cost.py} (99%) create mode 100644 tensorlayer/dataflow/__init__.py create mode 100644 tensorlayer/dataflow/base.py create mode 100644 tensorlayer/dataflow/common.py create mode 100644 tensorlayer/dataflow/dataflow_examples.py create mode 100644 tensorlayer/dataflow/image/__init__.py create mode 100644 tensorlayer/dataflow/load_data_backend.py create mode 100644 tensorlayer/dataflow/mindspore_data.py create mode 100644 tensorlayer/dataflow/tensorflow_data.py delete mode 100644 tensorlayer/layers/convolution/binary_conv.py delete mode 100644 tensorlayer/layers/convolution/deformable_conv.py delete mode 100644 tensorlayer/layers/convolution/dorefa_conv.py delete mode 100644 tensorlayer/layers/convolution/expert_conv.py delete mode 100644 tensorlayer/layers/convolution/expert_deconv.py delete mode 100644 tensorlayer/layers/convolution/group_conv.py delete mode 100644 tensorlayer/layers/convolution/quan_conv.py delete mode 100644 tensorlayer/layers/convolution/quan_conv_bn.py delete mode 100644 tensorlayer/layers/convolution/separable_conv.py delete mode 100644 tensorlayer/layers/convolution/simplified_deconv.py delete mode 100644 tensorlayer/layers/convolution/super_resolution.py delete mode 100644 tensorlayer/layers/convolution/ternary_conv.py delete mode 100644 tensorlayer/layers/core.py create mode 100644 tensorlayer/layers/core/__init__.py create mode 100644 tensorlayer/layers/core/common.py create mode 100644 tensorlayer/layers/core/core_mindspore.py create mode 100644 tensorlayer/layers/core/core_tensorflow_dragon.py delete mode 100644 tensorlayer/layers/dense/binary_dense.py delete mode 100644 tensorlayer/layers/dense/dorefa_dense.py delete mode 100644 tensorlayer/layers/dense/quan_dense_bn.py delete mode 100644 tensorlayer/layers/dense/ternary_dense.py delete mode 100644 tensorlayer/layers/lambda_layers.py delete mode 100644 tensorlayer/layers/recurrent.py delete mode 100644 tensorlayer/models/imagenet_class_index.json delete mode 100644 tensorlayer/models/imagenet_classes.py delete mode 100644 tensorlayer/models/mobilenetv1.py create mode 100644 tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 delete mode 100644 tensorlayer/models/resnet.py delete mode 100644 tensorlayer/models/seq2seq.py delete mode 100644 tensorlayer/models/seq2seq_with_attention.py delete mode 100644 tensorlayer/models/squeezenetv1.py delete mode 100644 tensorlayer/models/vgg.py create mode 100644 tensorlayer/optimizers/dragon_optimizers.py create mode 100644 tensorlayer/optimizers/load_optimizers_backend.py create mode 100644 tensorlayer/optimizers/mindspore_optimizer.py create mode 100644 tensorlayer/optimizers/tensorflow_optimizer.py diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 1d96c6c4a..000000000 --- a/examples/README.md +++ /dev/null @@ -1,23 +0,0 @@ -
- - -
- -
-
- -
- -This page contains basic tutorials and examples that help you to learn TensorLayer quick, but for real-world applications, such as Chatbot, Super-Resolution, Pose Estimation, please check [Awesome-TensorLayer](https://github.com/tensorlayer/awesome-tensorlayer) and [Home-TensorLayer](https://github.com/tensorlayer) - -- [Basic tutorials](https://tensorlayer.readthedocs.io/en/latest/user/get_start_model.html) -- [Basic examples](https://github.com/tensorlayer/tensorlayer/tree/master/examples/basic_tutorials) -- [Using pre-trained CNNs](https://github.com/tensorlayer/tensorlayer/tree/master/examples/pretrained_cnn) -- [Quantized networks](https://github.com/tensorlayer/tensorlayer/tree/master/examples/quantized_net) -- [Reinforcement learning](https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning) -- [Spatial transformer](https://github.com/tensorlayer/tensorlayer/tree/master/examples/spatial_transformer_network) -- [Text/sentence classification](https://github.com/tensorlayer/tensorlayer/tree/master/examples/text_classification) -- [Text/sentence generation](https://github.com/tensorlayer/tensorlayer/tree/master/examples/text_generation) -- [Language modeling](https://github.com/tensorlayer/tensorlayer/tree/master/examples/text_ptb) -- [Word embedding](https://github.com/tensorlayer/tensorlayer/tree/master/examples/text_word_embedding) -- [Many more ...](https://github.com/tensorlayer) diff --git a/examples/basic_tutorials/README.md b/examples/basic_tutorials/README.md deleted file mode 100644 index 222df955a..000000000 --- a/examples/basic_tutorials/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Before You Start - -TensorLayer has two types of models. -Static model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. -Please read this [DOCS](https://tensorlayer.readthedocs.io/en/latest/user/get_start_model.html#) before you start. - -- [MNIST Simplest Example](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_simple.py) -- [MNIST Static Example](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_static.py) -- [MNIST Static Example for Reused Model](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_static_2.py) -- [MNIST Dynamic Example](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py) -- [MNIST Dynamic Example for Seperated Models](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py) -- [MNIST Static Siamese Model Example](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_mnist_siamese.py) -- [CIFAR10 Static Example with Data Augmentation](https://github.com/tensorlayer/tensorlayer/blob/master/examples/basic_tutorials/tutorial_cifar10_cnn_static.py) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_static.py b/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic.py similarity index 64% rename from examples/basic_tutorials/tutorial_cifar10_cnn_static.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_dynamic.py index ee6af3d0b..f399bef22 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_static.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic.py @@ -1,15 +1,14 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import multiprocessing import time - import numpy as np +import multiprocessing import tensorflow as tf +from tensorlayer.layers import Module import tensorlayer as tl -from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d) -from tensorlayer.models import Model +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) # enable debug logging tl.logging.set_verbosity(tl.logging.DEBUG) @@ -19,63 +18,48 @@ X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) -# define the network -def get_model(inputs_shape): - # self defined initialization - W_init = tl.initializers.truncated_normal(stddev=5e-2) - W_init2 = tl.initializers.truncated_normal(stddev=0.04) - b_init2 = tl.initializers.constant(value=0.1) - - # build network - ni = Input(inputs_shape) - nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv1')(ni) - nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn) - nn = LocalResponseNorm(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name="norm1")(nn) - - nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2')(nn) - nn = LocalResponseNorm(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name="norm2")(nn) - nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn) +class CNN(Module): - nn = Flatten(name='flatten')(nn) - nn = Dense(384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense1relu')(nn) - nn = Dense(192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense2relu')(nn) - nn = Dense(10, act=None, W_init=W_init2, name='output')(nn) + def __init__(self): + super(CNN, self).__init__() + # weights init + W_init = tl.initializers.truncated_normal(stddev=5e-2) + W_init2 = tl.initializers.truncated_normal(stddev=0.04) + b_init2 = tl.initializers.constant(value=0.1) - M = Model(inputs=ni, outputs=nn, name='cnn') - return M + self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') + self.conv2 = Conv2d( + 64, (5, 5), (1, 1), padding='SAME', act=tl.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64 + ) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') -def get_model_batchnorm(inputs_shape): - # self defined initialization - W_init = tl.initializers.truncated_normal(stddev=5e-2) - W_init2 = tl.initializers.truncated_normal(stddev=0.04) - b_init2 = tl.initializers.constant(value=0.1) + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) + self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) + self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) - # build network - ni = Input(inputs_shape) - nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1')(ni) - nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch1')(nn) - nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn) - - nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv2')(nn) - nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch2')(nn) - nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn) - - nn = Flatten(name='flatten')(nn) - nn = Dense(384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense1relu')(nn) - nn = Dense(192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense2relu')(nn) - nn = Dense(10, act=None, W_init=W_init2, name='output')(nn) - - M = Model(inputs=ni, outputs=nn, name='cnn') - return M + def forward(self, x): + z = self.conv1(x) + z = self.bn(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense2(z) + z = self.dense3(z) + return z # get the network -net = get_model([None, 24, 24, 3]) +net = CNN() # training settings batch_size = 128 -n_epoch = 50000 +n_epoch = 500 learning_rate = 0.0001 print_freq = 5 n_step_epoch = int(len(y_train) / batch_size) @@ -83,7 +67,7 @@ def get_model_batchnorm(inputs_shape): shuffle_buffer_size = 128 train_weights = net.trainable_weights -optimizer = tf.optimizers.Adam(learning_rate) +optimizer = tl.optimizers.Adam(learning_rate) # looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py @@ -155,25 +139,31 @@ def _map_fn_test(img, target): for epoch in range(n_epoch): start_time = time.time() + train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in train_ds: - net.train() + net.set_train() + with tf.GradientTape() as tape: # compute outputs _logits = net(X_batch) # compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - grad = tape.gradient(_loss, train_weights) + _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + + grad = tape.gradient(_loss_ce, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) - train_loss += _loss + + train_loss += _loss_ce train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 - # use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) print(" train loss: {}".format(train_loss / n_iter)) print(" train acc: {}".format(train_acc / n_iter)) + + # use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + net.eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: diff --git a/examples/basic_tutorials/tutorial_mindspore.py b/examples/basic_tutorials/tutorial_mindspore.py new file mode 100644 index 000000000..30facdbcb --- /dev/null +++ b/examples/basic_tutorials/tutorial_mindspore.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import numpy as np +import mindspore.nn as nn +import mindspore.ops.operations as P +from mindspore.ops import composite as C +from mindspore.common import dtype as mstype +from mindspore import context, Tensor, ParameterTuple +from mindspore.common.initializer import TruncatedNormal +from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum +import tensorlayer as tl +import mindspore as ms +import tensorflow as tf +import time + +context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + + +def fc_with_initialize(input_channels, out_channels): + """weight initial for fc layer""" + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +def weight_variable(): + """weight initial""" + return TruncatedNormal(0.02) + + +class LeNet5(nn.Cell): + """ + Lenet network + Args: + num_class (int): Num classes. Default: 10. + + Returns: + Tensor, output tensor + + Examples: + >>> LeNet(num_class=10) + """ + + def __init__(self, num_class=10): + super(LeNet5, self).__init__() + self.num_class = num_class + self.fc1 = fc_with_initialize(784, 800) + self.fc2 = fc_with_initialize(800, 800) + self.fc3 = fc_with_initialize(800, self.num_class) + self.relu = nn.ReLU() + + def construct(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x + + +class GradWrap(nn.Cell): + """ GradWrap definition """ + + def __init__(self, network): + super(GradWrap, self).__init__(auto_prefix=False) + self.network = network + self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) + + def construct(self, x, label): + weights = self.weights + return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield _input, _target + + +net = LeNet5() +optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) +criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +net_with_criterion = WithLossCell(net, criterion) +train_network = GradWrap(net_with_criterion) +train_network.set_train() + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) +shuffle_buffer_size = 128 +batch_size = 128 +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.batch(batch_size) +n_epoch = 50 + +for epoch in range(n_epoch): + start_time = time.time() + train_network.set_train() + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_ds: + X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) + y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) + output = net(X_batch) + loss_output = criterion(output, y_batch) + grads = train_network(X_batch, y_batch) + success = optimizer(grads) + loss = loss_output.asnumpy() + train_loss += loss + n_iter += 1 + # train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + # print(" train acc: {}".format(train_acc / n_iter)) + print(" triain weights ", train_network.trainable_params()[0].data) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py new file mode 100644 index 000000000..9c06ec54c --- /dev/null +++ b/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +os.environ['TL_BACKEND'] = 'dragon' + +from tensorlayer.layers import Module +from tensorlayer.layers import Dense +import tensorlayer as tl +import dragon as dg +import time +import argparse +import numpy as np + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + + def forward(self, x, foo=None): + z = self.dense1(x) + z = self.dense2(z) + out = self.dense3(z) + return out + + +def parse_args(): + """Parse the arguments.""" + parser = argparse.ArgumentParser(description='Train a cifar10 resnet') + parser.add_argument('--execution', default='EAGER_MODE', type=str, help='The execution mode') + parser.add_argument('--seed', default=1337, type=int, help='The random seed') + parser.add_argument('--cuda', default=-1, type=int, help='The cuda device to use') + return parser.parse_args() + + +class Classifier(object): + """The base classifier class.""" + + # TensorSpec for graph execution + image_spec = dg.Tensor([None, 3, 32, 32], 'float32') + label_spec = dg.Tensor([None], 'int64') + + def __init__(self, optimizer): + super(Classifier, self).__init__() + self.net = CustomModel() + self.optimizer = optimizer + self.params = self.net.trainable_weights + + def step(self, image, label): + with dg.GradientTape() as tape: + logit = self.net(image) + # logit = dg.cast(logit, 'float64') + logit = dg.cast(dg.math.argmax(logit, -1), 'int64') + label = dg.cast(label, 'int64') + # print("logit :\n", logit, label) + # loss = dg.losses.smooth_l1_loss([logit, label]) + loss = dg.math.sum(logit - label) # dg.losses.sparse_softmax_cross_entropy([logit, label]) + accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32')) + grads = tape.gradient(loss, self.params) + self.optimizer.apply_gradients(zip(self.params, grads)) + return loss, accuracy, self.optimizer + + +if __name__ == '__main__': + args = parse_args() + dg.logging.info('Called with args:\n' + str(args)) + + np.random.seed(args.seed) + dg.autograph.set_execution(args.execution) + dg.cuda.set_default_device(args.cuda) + + # Define the model + model = Classifier(dg.optimizers.SGD(base_lr=0.01, momentum=0.9, weight_decay=1e-4)) + + # Compile for graph execution if necessary + if args.execution == 'GRAPH_MODE': + model.step = dg.function( + func=model.step, + input_signature=[model.image_spec, model.label_spec], + ) + + # Main loop + import tensorflow as tf + batch_size = 200 + for i in range(50): + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + image = dg.EagerTensor(X_batch, copy=False) + label = dg.EagerTensor(y_batch, copy=False, dtype='float32') + loss, accuracy, _ = model.step(image, label) + if i % 20 == 0: + dg.logging.info( + 'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' % + (i, str(model.optimizer.base_lr), loss, accuracy) + ) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py index d986b01a3..50186c009 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py @@ -1,61 +1,57 @@ -import time +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- import numpy as np -import tensorflow as tf +import time +import tensorflow as tf import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input -from tensorlayer.models import Model - -## enable debug logging -tl.logging.set_verbosity(tl.logging.DEBUG) +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout, BatchNorm1d -## prepare MNIST data X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -## define the network -class CustomModel(Model): +class CustomModel(Module): def __init__(self): super(CustomModel, self).__init__() - self.dropout1 = Dropout(keep=0.8) #(self.innet) - self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) #(self.dropout1) - self.dropout2 = Dropout(keep=0.8) #(self.dense1) - self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800) #(self.dropout2) - self.dropout3 = Dropout(keep=0.8) #(self.dense2) - self.dense3 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) #(self.dropout3) + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, in_channels=784) + self.batchnorm = BatchNorm1d(act=tl.ReLU, num_features=800) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) def forward(self, x, foo=None): z = self.dropout1(x) z = self.dense1(z) + z = self.batchnorm(z) z = self.dropout2(z) z = self.dense2(z) z = self.dropout3(z) out = self.dense3(z) if foo is not None: - out = tf.nn.relu(out) + out = tl.ops.relu(out) return out MLP = CustomModel() - -## start training -n_epoch = 500 +n_epoch = 50 batch_size = 500 print_freq = 5 train_weights = MLP.trainable_weights -optimizer = tf.optimizers.Adam(learning_rate=0.0001) +optimizer = tl.optimizers.Adam(lr=0.0001) -## the following code can help you understand SGD deeply for epoch in range(n_epoch): ## iterate the dataset n_epoch times start_time = time.time() ## iterate over the entire training set once (shuffle the data via training) for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - MLP.train() # enable dropout + MLP.set_train() # enable dropout with tf.GradientTape() as tape: ## compute outputs - _logits = MLP(X_batch, foo=1) + _logits = MLP(X_batch) ## compute loss and update model _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss, train_weights) @@ -63,32 +59,25 @@ def forward(self, x, foo=None): ## use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.eval() # disable dropout + MLP.set_train() print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - _logits = MLP(X_batch, foo=1) + _logits = MLP(X_batch) train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 - print(" train foo=1 loss: {}".format(train_loss / n_iter)) - print(" train foo=1 acc: {}".format(train_acc / n_iter)) - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _logits = MLP(X_batch, foo=1) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val foo=1 loss: {}".format(val_loss / n_iter)) - print(" val foo=1 acc: {}".format(val_acc / n_iter)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): _logits = MLP(X_batch) # is_train=False, disable dropout val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 - print(" val foo=0 loss: {}".format(val_loss / n_iter)) - print(" val foo=0 acc: {}".format(val_acc / n_iter)) + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) ## use testing data to evaluate the model MLP.eval() diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py deleted file mode 100644 index 58695c8ac..000000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py +++ /dev/null @@ -1,131 +0,0 @@ -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input, LayerList -from tensorlayer.models import Model - -## enable debug logging -tl.logging.set_verbosity(tl.logging.DEBUG) - -## prepare MNIST data -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - -## define the network -class CustomModelHidden(Model): - - def __init__(self): - super(CustomModelHidden, self).__init__() - self.dropout1 = Dropout(keep=0.8) #(self.innet) - self.seq = LayerList( - [ - Dense(n_units=800, act=tf.nn.relu, in_channels=784), - Dropout(keep=0.8), - Dense(n_units=800, act=tf.nn.relu, in_channels=800), - ] - ) - self.dropout3 = Dropout(keep=0.8) #(self.seq) - - def forward(self, x): - z = self.dropout1(x) - z = self.seq(z) - z = self.dropout3(z) - return z - - -class CustomModelOut(Model): - - def __init__(self): - super(CustomModelOut, self).__init__() - self.dense3 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) - - def forward(self, x, foo=None): - out = self.dense3(x) - if foo is not None: - out = tf.nn.relu(out) - return out - - -# NOTE: using previous defined model is different in dynamic network -# a dynamic network cannot be converted into Layer because the inputs and outputs are unknown until forwarding -# therefore, you may reuse a previous defined model in the following way - -MLP1 = CustomModelHidden() -MLP2 = CustomModelOut() -# MLP.print_layers() -# MLP.print_weights() -# print(MLP) - -## start training -n_epoch = 500 -batch_size = 500 -print_freq = 5 -train_weights = MLP1.trainable_weights + MLP2.trainable_weights -optimizer = tf.optimizers.Adam(learning_rate=0.0001) - -## the following code can help you understand SGD deeply -for epoch in range(n_epoch): ## iterate the dataset n_epoch times - start_time = time.time() - ## iterate over the entire training set once (shuffle the data via training) - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - MLP1.train() # enable dropout - MLP2.train() - with tf.GradientTape() as tape: - ## compute outputs - _hidden = MLP1(X_batch) - _logits = MLP2(_hidden, foo=1) - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP1.eval() # disable dropout - MLP2.eval() - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - _hidden = MLP1(X_batch) - _logits = MLP2(_hidden, foo=1) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" train foo=1 loss: {}".format(train_loss / n_iter)) - print(" train foo=1 acc: {}".format(train_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _hidden = MLP1(X_batch) - _logits = MLP2(_hidden, foo=1) - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val foo=1 loss: {}".format(val_loss / n_iter)) - print(" val foo=1 acc: {}".format(val_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _hidden = MLP1(X_batch) - _logits = MLP2(_hidden, foo=0) - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val foo=0 loss: {}".format(val_loss / n_iter)) - print(" val foo=0 acc: {}".format(val_acc / n_iter)) - -## use testing data to evaluate the model -MLP1.eval() -MLP2.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): - _hidden = MLP1(X_batch) - _logits = MLP2(_hidden, foo=0) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test foo=1 loss: {}".format(val_loss / n_iter)) -print(" test foo=1 acc: {}".format(val_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_static.py b/examples/basic_tutorials/tutorial_mnist_mlp_static.py deleted file mode 100644 index 358a0e561..000000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_static.py +++ /dev/null @@ -1,89 +0,0 @@ -import pprint -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input -from tensorlayer.models import Model - -## enable debug logging -tl.logging.set_verbosity(tl.logging.DEBUG) - -## prepare MNIST data -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - -## define the network -# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to -# speed up computation, so we use identity here. -# see tf.nn.sparse_softmax_cross_entropy_with_logits() -def get_model(inputs_shape): - ni = Input(inputs_shape) - nn = Dropout(keep=0.8)(ni) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=10, act=tf.nn.relu)(nn) - M = Model(inputs=ni, outputs=nn, name="mlp") - return M - - -MLP = get_model([None, 784]) -pprint.pprint(MLP.config) - -## start training -n_epoch = 500 -batch_size = 500 -print_freq = 5 -train_weights = MLP.trainable_weights -optimizer = tf.optimizers.Adam(lr=0.0001) - -## the following code can help you understand SGD deeply -for epoch in range(n_epoch): ## iterate the dataset n_epoch times - start_time = time.time() - ## iterate over the entire training set once (shuffle the data via training) - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - MLP.train() # enable dropout - with tf.GradientTape() as tape: - ## compute outputs - _logits = MLP(X_batch) - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.eval() # disable dropout - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - _logits = MLP(X_batch) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val loss: {}".format(val_loss / n_iter)) - print(" val acc: {}".format(val_acc / n_iter)) - -## use testing data to evaluate the model -MLP.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): - _logits = MLP(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_static_2.py b/examples/basic_tutorials/tutorial_mnist_mlp_static_2.py deleted file mode 100644 index a4110eafb..000000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_static_2.py +++ /dev/null @@ -1,99 +0,0 @@ -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input -from tensorlayer.models import Model - -## enable debug logging -tl.logging.set_verbosity(tl.logging.DEBUG) - -## prepare MNIST data -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - -## define the network -# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to -# speed up computation, so we use identity here. -# see tf.nn.sparse_softmax_cross_entropy_with_logits() - - -def hidden_model(inputs_shape): - ni = Input(inputs_shape) - nn = Dropout(keep=0.8)(ni) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - - return Model(inputs=ni, outputs=nn, name="mlp_hidden") - - -def get_model(inputs_shape, hmodel): - hidden = hmodel.as_layer() - ni = Input(inputs_shape) - nn = hidden(ni) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=10, act=tf.nn.relu)(nn) - - return Model(inputs=ni, outputs=nn, name="mlp") - - -MLP_hidden = hidden_model([None, 784]) -MLP = get_model([None, 784], MLP_hidden) -# MLP.print_layers() -# MLP.print_weights() - -## start training -n_epoch = 500 -batch_size = 500 -print_freq = 5 -train_weights = MLP.trainable_weights -optimizer = tf.optimizers.Adam(lr=0.0001) - -## the following code can help you understand SGD deeply -for epoch in range(n_epoch): ## iterate the dataset n_epoch times - start_time = time.time() - ## iterate over the entire training set once (shuffle the data via training) - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - MLP.train() # enable dropout - with tf.GradientTape() as tape: - ## compute outputs - _logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train() - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.eval() # disable dropout - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - - _logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=False) and remove MLP.eval() - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val loss: {}".format(val_loss / n_iter)) - print(" val acc: {}".format(val_acc / n_iter)) - -## use testing data to evaluate the model -MLP.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): - _logits = MLP(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_siamese.py b/examples/basic_tutorials/tutorial_mnist_siamese.py deleted file mode 100644 index 236a40542..000000000 --- a/examples/basic_tutorials/tutorial_mnist_siamese.py +++ /dev/null @@ -1,142 +0,0 @@ -'''Trains a Siamese MLP on pairs of digits from the MNIST dataset. -Get 96.7% accuracy on test data after 20 epochs training. - -For more details, see the reference paper. - -# References -- Dimensionality Reduction by Learning an Invariant Mapping - http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf - - -''' - -import random -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Flatten, Input -from tensorlayer.models import Model - -num_classes = 10 -epochs = 20 -batch_size = 128 -input_shape = (None, 784) - - -def contrastive_loss(label, feature1, feature2): - margin = 1.0 - eucd = tf.sqrt(tf.reduce_sum(tf.square(feature1 - feature2), axis=1)) - return tf.reduce_mean(label * tf.square(eucd) + (1 - label) * tf.square(tf.maximum(margin - eucd, 0))) - - -def compute_accuracy(label, feature1, feature2): - eucd = tf.sqrt(tf.reduce_sum((feature1 - feature2)**2, axis=1)) - pred = tf.cast(eucd < 0.5, label.dtype) - return tf.reduce_mean(tf.cast(tf.equal(pred, label), tf.float32)) - - -def create_base_network(input_shape): - '''Base network to be shared (eq. to feature extraction). - ''' - input = Input(shape=input_shape) - x = Flatten()(input) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - return Model(input, x) - - -def get_siamese_network(input_shape): - """Create siamese network with shared base network as layer - """ - base_layer = create_base_network(input_shape).as_layer() - - ni_1 = Input(input_shape) - ni_2 = Input(input_shape) - nn_1 = base_layer(ni_1) - nn_2 = base_layer(ni_2) - return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2]) - - -def create_pairs(x, digit_indices): - pairs = [] - labels = [] - n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1 - for d in range(num_classes): - for i in range(n): - z1, z2 = digit_indices[d][i], digit_indices[d][i + 1] - pairs += [[x[z1], x[z2]]] - inc = random.randrange(1, num_classes) - dn = (d + inc) % num_classes - z1, z2 = digit_indices[d][i], digit_indices[dn][i] - pairs += [[x[z1], x[z2]]] - labels += [1, 0] - return np.array(pairs), np.array(labels).astype(np.float32) - - -# get network -model = get_siamese_network(input_shape) - -# create training+val+test positive and negative pairs -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - -digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)] -tr_pairs, tr_y = create_pairs(X_train, digit_indices) - -digit_indices = [np.where(y_val == i)[0] for i in range(num_classes)] -val_pairs, val_y = create_pairs(X_val, digit_indices) - -digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)] -te_pairs, te_y = create_pairs(X_test, digit_indices) - -# training settings -print_freq = 5 -train_weights = model.trainable_weights -optimizer = tf.optimizers.RMSprop() - - -@tf.function -def train_step(X_batch, y_batch): - with tf.GradientTape() as tape: - _out1, _out2 = model([X_batch[:, 0, :], X_batch[:, 1, :]]) - _loss = contrastive_loss(y_batch, _out1, _out2) - - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - _acc = compute_accuracy(y_batch, _out1, _out2) - return _loss, _acc - - -# begin training -for epoch in range(epochs): - start_time = time.time() - - train_loss, train_acc, n_iter = 0, 0, 0 - model.train() # enable dropout - for X_batch, y_batch in tl.iterate.minibatches(tr_pairs, tr_y, batch_size, shuffle=True): - _loss, _acc = train_step(X_batch, y_batch) - train_loss += _loss - train_acc += _acc - n_iter += 1 - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch {} of {} took {}".format(epoch + 1, epochs, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - -# evaluate on test data -model.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in tl.iterate.minibatches(te_pairs, te_y, batch_size, shuffle=False): - _out1, _out2 = model([X_batch[:, 0, :], X_batch[:, 1, :]]) - test_loss += contrastive_loss(y_batch, _out1, _out2) - test_acc += compute_accuracy(y_batch, _out1, _out2) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index b1ccd052b..ad0c4685b 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -1,61 +1,67 @@ -#! /usr/bin/python +#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np -import tensorflow as tf +import time +import os +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' +import tensorflow as tf import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout -tl.logging.set_verbosity(tl.logging.DEBUG) - -# set gpu mem fraction or allow growth -# tl.utils.set_gpu_fraction() - -# prepare data X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -# define the network -ni = tl.layers.Input([None, 784]) -nn = tl.layers.Dropout(keep=0.8)(ni) -nn = tl.layers.Dense(n_units=800, act=tf.nn.relu)(nn) -nn = tl.layers.Dropout(keep=0.5)(nn) -nn = tl.layers.Dense(n_units=800, act=tf.nn.relu)(nn) -nn = tl.layers.Dropout(keep=0.5)(nn) -nn = tl.layers.Dense(n_units=10, act=None)(nn) -network = tl.models.Model(inputs=ni, outputs=nn, name="mlp") - - -# define metric. -def acc(_logits, y_batch): - # return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - return tf.reduce_mean( - tf.cast(tf.equal(tf.argmax(_logits, 1), tf.convert_to_tensor(y_batch, tf.int64)), tf.float32), name='accuracy' - ) - - -# print network information -print(network) - -# open tensorboard -# tl.utils.open_tensorboard('./tb_log', port=6006) - -# train the network -tl.utils.fit( - network, train_op=tf.optimizers.Adam(learning_rate=0.0001), cost=tl.cost.cross_entropy, X_train=X_train, - y_train=y_train, acc=acc, batch_size=256, n_epoch=20, X_val=X_val, y_val=y_val, eval_train=True, - tensorboard_dir='./tb_log' -) - -# test -tl.utils.test(network, acc, X_test, y_test, batch_size=None, cost=tl.cost.cross_entropy) - -# evaluation -_logits = tl.utils.predict(network, X_test) -y_pred = np.argmax(_logits, 1) -tl.utils.evaluation(y_test, y_pred, n_classes=10) - -# save network weights -network.save_weights('model.h5') -# close tensorboard -# tl.utils.exit_tensorflow(port=6006) +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + # z = self.bn(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield _input, _target + + +MLP = CustomModel() + +n_epoch = 50 +batch_size = 128 +print_freq = 2 +shuffle_buffer_size = 128 + +train_weights = MLP.trainable_weights +optimizer = tl.optimizers.Momentum(0.05, 0.9) +train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.batch(batch_size) + +model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) +model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model.save_weights('./model.npz', format='npz_dict') +model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_ms_cifar10_simple.py b/examples/basic_tutorials/tutorial_ms_cifar10_simple.py new file mode 100644 index 000000000..a31e99b64 --- /dev/null +++ b/examples/basic_tutorials/tutorial_ms_cifar10_simple.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import numpy as np +import time +import os +# os.environ['TL_BACKEND'] = 'tensorflow' +os.environ['TL_BACKEND'] = 'mindspore' + +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout + +from mindspore.common import ParameterTuple +import mindspore as ms +import mindspore.dataset as ds +from mindspore.ops import composite +from mindspore.ops import operations as P +from mindspore.ops import functional as F +import mindspore.dataset.transforms.vision.c_transforms as C +import mindspore.dataset.transforms.c_transforms as C2 +from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, TrainOneStepCell, WithLossCell +from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) +from mindspore.train.parallel_utils import ParallelMode +from mindspore.nn.wrap import DistributedGradReducer +from mindspore import context +context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + + +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, in_channels=784, act=tl.ReLU) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out + + +class WithLoss(Module): + + def __init__(self, backbone, loss_fn): + super(WithLoss, self).__init__() + self._backbone = backbone + self._loss_fn = loss_fn + + def construct(self, data, label): + out = self._backbone(data) + return self._loss_fn(out, label) + + @property + def backbone_network(self): + return self._backbone + + +class GradWrap(Module): + """ GradWrap definition """ + + def __init__(self, network): + super(GradWrap, self).__init__(auto_prefix=False) + self.network = network + self.trainable = True + self.weights = ParameterTuple(network.trainable_weights) + + def construct(self, x, label): + weights = self.weights + return composite.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + + +class TrainOneStep(Module): + + def __init__(self, network, optimizer, sens=1.0): + super(TrainOneStep, self).__init__(auto_prefix=False) + self._built = True + self.trainable = True + self.network = network + self.network.set_grad() + self.network.add_flags(defer_inline=True) + self.weights = optimizer.parameters + self.optimizer = optimizer + self.grad = composite.GradOperation('grad', get_by_list=True, sens_param=True) + self.sens = sens + self.reducer_flag = False + self.grad_reducer = None + parallel_mode = _get_parallel_mode() + if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL): + self.reducer_flag = True + if self.reducer_flag: + mean = _get_mirror_mean() + degree = _get_device_num() + self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) + + def construct(self, data, label): + weights = self.weights + loss = self.network(data, label) + sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) + grads = self.grad(self.network, weights)(data, label, sens) + if self.reducer_flag: + # apply grad reducer on grads + grads = self.grad_reducer(grads) + return F.depend(loss, self.optimizer(grads)) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield _input, _target + + +MLP = CustomModel() +train_weights = MLP.trainable_weights + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) + +opt = Momentum(train_weights, 0.01, 0.9) +n_epoch = 50 +batch_size = 128 +print_freq = 2 +model = tl.models.Model(network=MLP, loss_fn=SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True), optimizer=opt) +model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) + +# batch_size = 128 +# epoch = 50 +# +# # loss function definition +# ls = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) +# # optimization definition +# opt = Momentum(train_weights, 0.01, 0.9) +# net_with_criterion = WithLoss(MLP, ls) +# # train_network = TrainOneStep(net_with_criterion, opt) # optimizer +# train_network = GradWrap(net_with_criterion) +# acc = ms.nn.Accuracy() +# +# for epoch in range(epoch): +# MLP.set_train() +# for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): +# X_batch = ms.Tensor(X_batch, dtype=ms.float32) +# y_batch = ms.Tensor(y_batch, dtype=ms.int32) +# output = MLP(X_batch) +# loss_output = ls(output, y_batch) +# grads = train_network(X_batch, y_batch) +# success = opt(grads) +# loss = loss_output.asnumpy() +# accutacy = acc() +# print(loss) diff --git a/examples/basic_tutorials/tutorial_tensorlayer_mindspore.py b/examples/basic_tutorials/tutorial_tensorlayer_mindspore.py new file mode 100644 index 000000000..e4802211e --- /dev/null +++ b/examples/basic_tutorials/tutorial_tensorlayer_mindspore.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import mindspore.nn as nn +import mindspore.ops.operations as P +from mindspore.ops import composite as C +from mindspore.common import dtype as mstype +from mindspore import context, Tensor, ParameterTuple +from mindspore.common.initializer import TruncatedNormal +from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, WithLossCell + +import numpy as np +import tensorlayer as tl +import mindspore as ms +import tensorflow as tf +import time +from tensorlayer.layers import Module +from tensorlayer.layers import Dense +import mindspore.nn as nn + + +class MLP(Module): + + def __init__(self): + super(MLP, self).__init__() + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + + def forward(self, x): + z = self.dense1(x) + z = self.dense2(z) + out = self.dense3(z) + return out + + +class GradWrap(Module): + """ GradWrap definition """ + + def __init__(self, network): + super(GradWrap, self).__init__(auto_prefix=False) + self.network = network + self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) + + def forward(self, x, label): + return C.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield _input, _target + + +net = MLP() +train_weights = list(filter(lambda x: x.requires_grad, net.get_parameters())) +optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.15, 0.8) + +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +net_with_criterion = WithLossCell(net, criterion) +train_network = GradWrap(net_with_criterion) +train_network.set_train() + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) +shuffle_buffer_size = 128 +batch_size = 128 +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.batch(batch_size) +n_epoch = 50 + +for epoch in range(n_epoch): + start_time = time.time() + train_network.set_train() + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_ds: + X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) + y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) + output = net(X_batch) + loss_output = criterion(output, y_batch) + grads = train_network(X_batch, y_batch) + success = optimizer(grads) + loss = loss_output.asnumpy() + train_loss += loss + n_iter += 1 + train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + print(" loss ", loss) diff --git a/examples/data_process/README.md b/examples/data_process/README.md deleted file mode 100644 index 85888b3ac..000000000 --- a/examples/data_process/README.md +++ /dev/null @@ -1,10 +0,0 @@ -The examples show here are not the best, `tl.prepro.threading_data` is for quick testing. -The state-of-the-art method is TensorFlow's `tf.data` and `tf.image`. -We will change all examples later. - -Please use `basic_tutorials/tutorial_cifar10_datasetapi.py`. - - -### Blogs - -- [如何用TensorLayer做目标检测的数据增强](https://zhuanlan.zhihu.com/p/31466173) \ No newline at end of file diff --git a/examples/data_process/data/.DS_Store b/examples/data_process/data/.DS_Store deleted file mode 100644 index f9462214891db31ead3e739342e3706e527c3c6d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJx>Bb5Pc&QBsNsUa{qzxA8g3c85(1+1cQ|j4id}j_}jes&~Q0yEp#TCc{guo zcDYyFumEI!Tt5I)028_*P8x=$>*@o$2}V(LkJaO`J?xpy@I0u#PH684YizOE9eVvO z-mpiDd%WY7>ss@H4fBTl7CWpMKbc?du#8_2dtN^@*;F7ENCi@XRNx;gz?rQ!xpmB# z3Zw$5z_$YWeJFIr8rV77r-Q++0K_qAGd|m0f>@Lw*1*n@3{AY1=&cf04DojICF*Km z=jiPaJDK~$$r3jdvD3*HD~D9ajHy5>&{tsO=~Ulfq;~N2uf9| zbPW+mlqjKh5Rl9H-+RwF^WMyx_wAW)zCG)=XRWxJluRj@m0FVyQO+igXAq=1d zQcwXY&Uyj7=K)euQk_TnH!jdn(^68={bhx+0Vt>_sn0=0_uq%kPXnohsM+PUFVNg} z0tx#@(<)@V6lvJJc;k-WhjGHzgG^C)Bj@?Em(Jgx>aQI914=4tiVNo^5#V1=DgO%P ze~lEBR6rqlc50BYw!&?v8)rBG)4B277Z9Kcn3qIGAzwC}Ovk(;4q|Jr`0M86CXdUR zB0PU<7qOYX`qd4;Jna}Rm0UGoUk86dDkhA`SwEo^78Dl#H}&*yK?9H+d^z6g2rj#P zV3lMI?5r3d~rj1uznSPthiMRTwq(({xm(e5@N0B^z+m zlCHvzUJXbjm)Z8U8&AF1|BZZRh&+!DN%t2?o&mUhrzJOkwinbWwXL~d5omYu=YA9V zTUMp{C?&qU`1c-Ade$y7?hQvmZ9<4pGYn>!uDjv<wYj2cQYg9V7(0VK0EM&dWdIspJPrTc(c)RvugH~8qo7IJH#zA-t zJ`jr5=nrA9dhc}j^lG3~6ON2dnt-|h5I1q76XLTRV7*;D?*yr>{AwL_f>Nx|i11>J zc&OmMpOj|zXfUsSw5K8W3!zt~oUAn6wFWxWz=qWu9m2x@&ZJbX66;A#NYs@;2`Z5r z{7TmewwCbX>Ign))`hf`bzT{2hlg~QOKHik`!v;bH;d&~#jNl6+pVgx^U_`CEhagR z##&UYqU1Zi=@;}V3_c|Z0%QgA^Kl~69}v8*994*Tuo8LvaS+YbX)sZ)?!>sC##p=A zhD7-*lP;a9Kt`6!;JP;(%~6tUr-|=Iw2G=A*;~zAf$(7Fs0pd{R}ZNds}PGgfeyA&T#>e5Od6cd+!Al6TiM`oFkYsAzZ79U+w`p zR=fX6YyFO^gp*pVjl9yQD+;qsIUQ!`Vag9X0dC;ll6h12^H72XN z$*-`^mxMyTaurHYd9+_}HB&{MzmzYx`p}vUg)B3BC*gv!M2DKY(qq5u@o@a8Q;bi@ z`;31AdATW4pm1&TcinbJ+b;cATxSV3mxb`n8vg3@JRR~J|NHeb6l}4wZO$lH0ub}R zySQJ@)FTCv>3qLU{+5 zTKavKtBe7;4qKv>?U*nS^BpuS0%F|N*v>r$+{S-Cs%Obf4}0|_9AYzk!5{|=iM{0< zXxHuDqKl<`*pXx#ETK1a2KeZNJK3EhU>UFG1Rh8n)cY%{*C3L7)sOG5uORPKFW!Wu zA);L1kxXpgBl9Pz-!ELASaP~e$ad>!DWf)g_~D9Bw-VT-Rq;|COU-fAVs`bk=rglN z!$W;U#t!us!!rN^^x}kKsp>#^l=eq*KU9UqTL%Y~I&DLTn2$odIIgoMo)&=g+W&~$ zlzUiMFIJ{qG2v~sl&t-3pY4m#^p00B)!{h?SN*aJ1gSxs} zy1LYV6PSzGB@wKgVyv@CW`_Yj7qoRoEqq4%t*ZZOYX%c%o6ai*3nDrl-VpQ7dbmUI zfg7jkHQwPM17^+E_MJ3tf(#t5uoUCZlJ3|O%3ngzc~W#G?YU}wkC&e}u*XGC{^HB# zJf*BMoM!ZA_)F?X=%JyowzS%)K9zUU?;7b!q;YUXzAw0AHiq3G$PP1;r;t@ryNgru zs%H0o3Trq^X*#M&Tq#Fa*GDh0S1lXNJOV>Q;kEdgv2t?ti7jz}rA(-<+P=rbRQ?R$ z@f4kQBW6Fz%vT^eyKuEhZn#`C^-bY-zAqosjydcHPAmrUlTB#1?>Krlggp5zjZ_hl2OJdFWc0-c>`&l-7Z8< zY>F5)=Y(Eu&Z1^};??w%&e@=&RlNh%VwU^vU4ouFDmv7=aI{6`_zYl~_U(nw?3!Mi z!^#feO3ujcQ*&b2?G1%{5tY!Sh_3F#dlsT5+6BAEe5`wnrJDzQcHx5Isa=`1;`f?R zroQz2mPEzmN^<@y6I(VKS1SK%6%E&I+uLq6YA7c%W+^Cpu|?M{w1}uNv;Lk~U{xTc zx)iFDfF1YPoyD7|R@YVX{X#Y?{-@deLkzK7thunrZJ4?x)dbbrDnpk{`z~xh2Ww(S zC%Dd#Qo95;ZrLj8Sn9)g`o{(=$0zR(%f_faK!KVO6EiVUlCk*ZQ}NuI-%>+ha$2rY3Ze54qO z`U(nX9?<#f(~VV|kq?nDn;Y;vwHC*_e~Hs^*A`rjXtiQu9CF9B$lRZKeB3)w{|X7W zv-^cCIGzg+Jg${x@VepFF1%EyPOC=>%-~v?zpG$Ikn)uYz|^^IeQ~sDD@QglYHjRV zgsCPU7VUD%^OcVM+LNL|8|MYtqpp0)i=k;<2#21A5PY#uQu8Rj!QkO(hc@0I`If=u zKrcZL7qic__m>6s7@O{bW4&5c=BcaNm8)Urb3sL~z{gPi&cVE>u$PNej%(4s2%eFs z2`nB3_5IGhpTij+v)WA}J#-hweJQe>B(#FY%ql==Rl{9h1=g_lr&jH!8B;~XyMoH7 zK?#!T=l$4SGFp?qhRdq%*PA@1fs>|^aLx`;t*)X=%8acBkdp)ivH^h<|E62l<=XRm zEa1(u8BH`vB_AOQC-&kt`U{enHBGZx*OLc5;OwfFFm&ZIR{_=`JO>lWoWR;656u#u zMG7pOSS#|_+Y~_$Q#cFj-3y|`BYxNhJ|7m7GfEWIZkw#vURJ*nA#t-IT`StDwUqq! z5MGdDTc89Kmm@6(#}pz^*H)raem<|G{izZ0RJfg)B_O4|GT znYzWD3&h9E+-u)@ee>gN*Y)1bMgl5WGam>aG~GALdCF+`iTs*Znoap#99?J3m zOqr#3-G=*TpTP$~&^+tGy)gQ?XE9;#Abb>^UE%6aFWt(zHC(Vz{{wwjj-tMEEnVYY z$Jo7+^|FvDF$ZTZEiBs+-u#|>8nzL`D_I9KwUy&X^t_N2Au*@TH+R>(y|`%iu_1Cn zbs@5*-RI8WYly_<^8t+Ak?oQFOCSy>WB2+^zWvKi4v&!PU$abo<&5(S)hRM%TljZR zLz9+ESPf$wZA0cY<=&7V*nI&6S3()j0BN(YuZtv3a$H+uRxhj^@X_LWni%?Vuvlc4hln+4On=Cdx>8L`=47QOHj$YsW*&JAC>8ee zGv68FtsFToT-cfC&Pf)9Xb%+ zE_|xun!eC$u@B8(Pi`hs&KZOo9C!CRy&$`wg`N-RWL5mZMR?pk^@~isX8n|5ZOc6; zmOB06C7|2!H|^V^4Z{kIDhH?G*#mJ8mfLd0-g4p_w|23X>#EU#(28zerXmP~yr)^k z*Lt3~K*l6{(@lML$78dsiHNPQ`UC(gX1kXI=`m@$0}4lKz;a6*9~?d<)w3m^l;s53sLc6x&u5y5)>>`=u!~<<@3mU z!+EaTw?~eJR@%(vU4uq}Jg~0em4UKElEfB2hp^5YS8xVke^A({v*9$&jI%rfYKY_BES5O}e8lhM3tM4Zqg7kS2Jq^)4^1=cvGlgZ7z{kRhgy31e@|T0y4vF@6~zSwfRDnzL(#MYJ{*!( z{rYKbdh5lV3UL%iwE@XZ5l6B5c*SdFoh!w{ zKQWjhEy|Ok6{At-SloKM!*Iqmd;T#WF+8RDG|=wQoMsD09B>wacR5WfgXi8By_F{L z#rK97Zjy~?Hf7sRUXk5*?JY}lD3yHVSW1sv1P2;FeCXsRtM#ijquemmA+ad!6|Qj@ z=J}95Iyg!v%5zmqaYmwnX7=9p+*2$;j~I4gL~N>b=~B|ekDq^&nP;?!KC|l9bW?gC zcc8Q@B)*^%lX6Y$X4U0BQ!$I;e%cz0 zTa3{;L*ujkY(5hoQrc}BHn?Gds@Zsz*XzI`PNHS93VVfdoCS+7pJ>MKU**;C25#e# zEtu{HNZ=212b`^5!=Z*$z}&^JOH@k;Ed|e?zP>w;=K^=~+amW1rHRk4rjz$b!{Ig_gox30s;d+{JVAjr;~>HI(YwFcaM+& z-g+ZhIiVHbljcXCy-e9rMP>;!xEq|4i^Yc^hfVVrPO5MguY|DV7_E?)070JpY~>-m z`G=ZWjVM|##lVhvRt!z|${UUk?iy{l?GFee2o|3|w8vnbKv)Jdp&T*-x0~18g%jzPcXg93DQ~rBmav)5qWeO^iYBd7A5T3hF#+IV^bjosUD^iv{CIhb<$iFK8m@mI(b-!?0~MDvGW;o;k6YwO?haAH6{D#Y;% zOxRoVaY;>I?SMO$sdQNsSPt~=sB!OAGgVh~`C+8KyC;S`=$%saS+31s2(O54_c~aodS6NG)%!?LYZpw5!{6#VOu;t}i;1Fa7 z=NhLT;upVIq^!ZgMXZ-?d`d&C!jMq;b&8_2NS02F_g7D`#6Aq9UcZ7X%Jz1udoht? zJhfp;YNzQyR${YQ@g$n{%NMxduiA8j?`c%8B6 zW^ap;;J|Zj@dt3!O})3VepbN6>t%L?yM~C0Hf@}^_BV?c2;O0tfi@z?7)w;|bgrHp zkx)v*>&SW>F67mfunMm_-0h|9(O-b1)pdtLOi~Wv_J_G|`cAkn)V;%LQIvzK*fnxC zmm*l?@?bVTu04-H)BWq$e`G_0adUCriX0P`=cVkQ|HgmR1ronTl6z=@0<%GqZbo5m zkjjDdWM4>RLI%+#K2Ju$g*sD_t}S_HgTgw)MWuVG_@YNk!37nPoY~PM8DhAml`U87 z_7a#@?o!E#uKP(DLef0`A=q4=bfK&RsD6+gV7WK|-Y%XT30X)Ei2upYy=q1a)Q%Up zQxIz&oN>UYz1>e=$8q6WX$;su#xw=_Wo`AHRP%fjTeIfY*ykZES?R`t4u=fyT3RwKxL z*Sgq*xEQTBBfcsTf!>=BveA%vH50b94QNY)0Rsob;}2e6g)-?#FW80<@fA`KcsMchF1;zIf$R0PW4sOa8yB|E z0PI;cVRimG9jc~m%+Zu*fMu^gw0#BlS&geCM&El0{?2G3zkIgvb2rE43}6&uWP@VE zzU3g--FfAo0BKJe@qmmZ_{xL=-f?;Dw?27@kKWFQ^5{9P&~#lH4%(>FoPPA8@P>-C z%?^rExXhPZi;zotMZvDQr>=bx|Bd;j%cmIeu$oCzTpEuG7bA%wLg4!~rRW{?gRa3$ z!pVaaghdta6FY&IFv;&^Eaj@Wh}w#ArNt_^G1%VDJnbT8+NN74`XX6A#rLR?o=Ub= zXWF>+6JNKYakxpia5AbldS&+>Q=joWowQ43#ZRsFDi8Mt;p@%((qMe2qKTHjwrKhX z@d@GsS*RWQJ^6b$SIMRt`1#5MOlnC_=!gVRLdIJ^A!)a542x zT>NI#6!u}Hu+r|4&4lQutdLA|=hkS4U_z6viP)37_{E=g*cc-5!M0IDlqsZn^9+C- z5z+qo#v)U!Z{GS6KrPxyPD8|*=`pfMEc1*<$T|FaqX_beOxy+BGN(XXHL zZ7g|Wm~>J%cDvmC#w?u$T}*TQkLv=aYKkm?pkc+C1hcm2Fb?H-!tQdDatbq*miQ0^ zWVSX-U`ZmBb9soQ#%jq(Fw7^3u|44Bc_LfVkeVdV_8_7kDqF_Dc}H;Iv`E*Viz@K3 zlB$dgRt@Bd6m}%jTl{L1@8nCIv?d%$gZnMv_w?<^si?!cLHy{jVu*PEzE!IIqO!Y{ vvIm3b_7cM8K;7P?qAZ+d)++8+a$q9PB6AWPMiH)S^?z*F|Nm}T&%XW_SAN8g diff --git a/examples/data_process/data/cat/img2.jpg b/examples/data_process/data/cat/img2.jpg deleted file mode 100644 index 8ec9afe7340bf8dd21401f0a5469685b46311712..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5487 zcmX|E1z1#1w7)Ekuq@z`3ofwa0wOFdERA#u$bt(fC7sfWN~cJ-C|ydo0!oTVcPL%b z-N?iL``$Y<-?{h9@18k#=FFTkb3JiA2SC)6RFwccJOIGEDZuqK5Co7C5ksh{p_C9x zC=^Nyg+QTnP{<9Z{U_4X(m|p0EZlH#3Lgk z15WBHoq6A|+*=gInh=(%{k3zuOq$H;gkA%7ys8{Jn-w?K zj+Gxv;|=1Dy-l!5r^JfZlUoLS7bX(MoDuf7`2_qm^Q*f}pd5Khxw#_x4y2#tH1Sok zb-6)tWNLT_Rv-xBCr>SlNK^6u&lp@qo(zvfp7KNO!4maHPnpi!zbkjhLd&m#Ldm4# ziZw$A;flzl7yUQT$3JDz6c| z^-559nqp** z$iq|FN2rbr9JQT_LR5W5>QmSC^8^86!DaK8NBSP*!dV;c4(C}@&6LW@KYZLC&;Iy8 zYNRY|Ud4AjqZH(Pr*%t0qQ)pJcRbLWx(kw<6UIZ`Q=KuC7&yLWNjNLLD3*`HEca}UDFkS@g)L7;S z;!PVbYD_Xcti4cNa6!#)(q`7U#Yg={$7AE3tY`EKJOS=AMXqIC2zoui1fi%It#jVr zg^1~Aix;J5@4a^}n2f)Y4SCVwNs63*?2w!Axn$ysyc*r#`k=-1cVtMOHkJCH^HpSr zxK(eKfq#BQ!hrAZ8tq^yKXtpBHn+5wWZ&&lK-d@18P=uJw70`{EZbQWgY8`#E*0Oz z0~V@mXLS+23zXtGrU(Ixb$yyRdP*8W%1r$NN1^l(vox=#AKbF{mA{x&y)B?JLcWr` z2F_k1W2Z~u{VQKr<}id7e=Y6nT2w?$U#SxOk&N!TT>JvC^la1N^#-!H6P656Ez!=r zR`2Wb%&E_p7*bd7sz@mp5~m#W^mZDX|KRy~dS#&Tf%$!3$&<$$FJU^d1lzMjvu5_9 zogy4B4S;R4<}#xp(n#+u5Q)?3k}7_COfQTGXqAG($Q zOUG-TnFgcrN8;kNr}=Sh;4{*5UNkD?2bz<;_o*Izi*GK|MEYe!>|~G30z|Mk{f|GUj@BB3bW zCu(Q=HCBsC+`4<_qp75Leu-*~fT$FWSEOA}mS%QaO6}Sa<8yAYH;>Ku5rLq~=n$Vk ziuaVn5tCtAf~yjGg6_orb2L_vFA|KyNy6Y9!$pgOwNK^^YKL#l+2yyUdRlP1ZS9nT zz({;O&*(kQ5{Dp>Zl6dq@v(oW>yGYalB z8x6LFtN4Age$wejSjslB2o$}fmhFrBH&ai7cq3Yj>tWL;QNwh4E~r~!_8EB1yl*k0 z8b0aO%FxESY0ce>Z$#HX%3mGWyyi7!fg;v)Z~#aapk`j z>Mq9;wIKyDyO5Xc`a{QdH^INhrlQs&ZPn&(Qe;V&+Ceva=b)h6U%sX%9DF}R>G_{ySJ0)q!`Yn0C#=HJci?vt5e05b|^{09)3}_8?Lfe1xOHQ ze0CVcJRDJ8bI#^kjzcFl|BOqRlMl6|uOyVudh{rGhe9-oE@qO5XtDRHAvSYGD8tLj zhYjXN0@4UIa$&K*!z0Q6!Qt47#q~(l0gsdY&7JJNT85o=eIFs#TBa$v*sb+~OtJ!nhf^)cZ#JhWpmXDW&>Rt$~_0BoDqzg-k9VS%y-j)Z_M^Nt+_0 zqC;iEUi@+xBf9){4TS6M9;&!Dg7}p2%gWC_c|Imje#k%Y8ITA=pCvwTd>|CKVR2DF zjcb)T1GLLVagH#3!N1$-xq+FdmQh@)jV1EUjKR+ndG&ivaP&h914KzWM5ApH8S{>^ zG7UXcu(=ey0^TLv9)f5Im*4e#M(`!ic;a@CjTT4xeA>|ALK2zjuVtIn7I<&wNQuMF z3rp>7O<_~p-TY37-!Gn+Gb9tMI0s5umI;V?N!IibB!64+zKh#jAsw+1N6lsBh&S0$4GJ6o+Imb(o)~9!{+(JV!caYl~b)R z2X0GrDpRF`K%7GS_N(`5`&Qo8eIMOm9)GrzZAHUc=Pc=q9>}u0dY109{18=cw`BPJ zW%Sv!_g6aO2fqf!!a2EAHO_Fq1M_Fw-fxmmk}@4Re@-@_(@ephys>eyd`c1S3Mfl; z7jBYc8I5L#$zjH%`%WLNzlucu8G-a*^OIkwiWw z4kPck3hKx21Ptf*0&-rqcbv95O^d8gh~iZYuV)f1`MhV>9`uOq8`qu;Nyb_b&iA5& zH~5W}PhVCA!R`n5rVn~5K6uP&gm)fUJ>+8_Zbi9%MgCZ z4xWnYX(i#Dv%{>{kRx#l{e@>y28xwMx?qmfM)@rY1Wo%Rxxy8_!WUCeL<;qHyl%%^ z@pib$a4^3gMa_Vn^Gp5G{`QbC*W|}OE_(M+QEEg@7_-hZBLYK~=WF$s%*tX7lk?x& z{HX}<^6fVyH#2iMD1fX@+9!CQ3xG=9n$gY+Supj36StHM9O8rGB-&cfLfN~glIY8u zSIgQ|&d)Qy&(AErqn_sl#j=&}doJ95dyIM0A^ zb*=&bn=`$8(YXTh=k#~mei>gQ~oh8jrvU6tC}E9orRsgarW0Nzm> zqBa3D0z3WT8NzStDl?d!6)Q{(Z9%=H`Y#K28qtx%UlkTPTA)IU`KW*x!876txV^W3$_iHPeVw}~@@VsV5Ig}e*PM5M!K>I{0QlQ&`*PY(( zVVAwwin|TMhIB4>YszHq&Jg^5qe8k5x1naX!_*s>dxgwl2Wi(VY1p0=datR&Rs;tL!!qgodLBKQ`;0}LR zw&b(FYFioMuQP0ostckgPDtlL(p;A^8r}P5#2aIav(55ZAyu0XRcCL!T33g|1JP?4SKAGJv9Sfo^3qdCRi&inJf3_2O{Vwl0UIY4lW2Y6Zi^fa|YpUZThh&O&4~-F29eNFM zn2T2%CT)I;T)oP=ZGFcoH)dI8OBlY+=_STzX8@Lg%$Ly=?k5&DdIyG> z5}wssPVCqUVG=RpRL4=qnYBd%rfjQ=1sH38*Omouxhew*V@78Jh}>W+8+egcn3){s zWo&M|dLO4`6=oYLUV0hCEJvWQ7e!HHD)^=H8p!t`()QHuXT8^>OvR;>&Qz|?Xx7?4 zgJyY0$uaH8y419^0lK(6y(nd?y*=zs<=Cl|;rsm&%;3?)rG?=>%xMmg#1_8-M0sQ53IJcMZ56{FSyhu>V0x z7=jv9`x7=@ZLjz8WSi+4cuF1ojE~XzfH$2m%AU_dGPhsElNk>C#`MUwG?q8RH*-bG zV#3wX90|EqkBSq+jT<)2XR|%uk;#sFoGHpVM*QLSpH7c^J_?=LtE&YZbkGR!doALV zTPAjG=yolY^jGFilyQ@qnPHeW!^>@>Ry9x&-fC*&$@p!}?=wf-;%C*hXXyd(z-9C! zMsjdB>K=P=LR#W|j@%wVvFRa>XLI8z+GWUtkMotD%o((j%^2w6`!fR$dP`_38pfav zl3STbb!MVj`?%+Q>Pk66C(`b6@iqI`o2jY2loKmwj?mDH=ixHatWZ*?IFhJ~*5E4j z$e*MsoZ@0O%6G@Scw7(!W46F9JP)PeUmynI^A%u=V5Rf`x{sc( z?L?ad=bW3I4EPnl@dT7>@cy(cK$2?QW>9-pk+6&}uN!Y_Uw`>E zeyzHDLMwxK<4DJVi7j4QWf#MjrWvu#CPKLHL9RnNe z{m&{nN=A`!u(4g+u-Xdt(01&x*_{WWkGC9-oI}UjM%5>c1bwxdO4@Xs^<5PYr}_xQ zHiW$^4h;&&Pq7tQ-(Ogyupb9ARz>>X`31iQeo51eT&YcmcO;WgQDYukP1kX6qx~Cw zzfT_F!u1dBoRH<4R)gfP^h^ynBJ=SJqOO4_Y4LvTMDJeev5}JmU_Nfuso`3| zJIlHQ(ySu_@r*q^%lMl-7WuHBT?k9;;W|AHK=JA%Q4yPJ-vu+3H5hrejPI*zY@eef zDQg>sh)$jPDEgP_N@%b?Y`Mk6TvApn62u;TH!WB<6_xQpclD0SBg4r1$oZg<&-i{z zmM?<(sPY5%J0x7bUg zi_w`~uAx(EjQ$;ac*q~0B{V-5c+3x6!8mL3Ycx2@smzOLf=(3%1Lwqk?uS_V>i)4j zV7WVL+oA8IESusY{Ii&efQBa5_h_#6rx3dd$uazj_AW-Ef6%c&OF2yT^Jus;q0Su} zlZKGReVNwr5Frl7GS534bVOd6hoxI&_1 zk#!4Yp*MdZ-OV|8`Tn&6>(e|pY4(#Axz1}q=j#djHwhMMEKPTGUX;R5sa*`I?Kz&> zqI_7qEe{P=EZK$PgM-*GeLr8DG|?4QDX=;V7#;sWvD(I@LHs#`;AW_sYa1)^_2mBm D5VYa4 diff --git a/examples/data_process/data/cat/img3.jpg b/examples/data_process/data/cat/img3.jpg deleted file mode 100644 index 3b1f85e0548ce38efbdcd555ad828d63d3d3b2ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7478 zcmY*;1yq#Z*8UVV(&f+$(lB%*okMq{bSo{5lr%_7Ne(3-0)kRX3?U3ELwC1;2uSP4 z@BZ)q-tRkSy=R}bpL6!R*V@lM`@HM@JM(u1K&qjNR0RNm008i=1OCnfVgLlVcoY=m zROA#C)YMe>@5AU|_hI*GVAOYl@$P}aXkZMS%(S#@bTAsYAe@bhhmVgB#w08u%qz}` z;Nt;8p-^HXVk%M+Djo(Z1|9^04}n1N+#z=(KhNEGCwav0rvE~C#KrG)JQ8=>f7ab$ zf4c!>_<%2fYcP-n03ri|$$)>+0D1rb1Oor#>VFypi~|JW;^E(+vZMeY2#kXZ!2#ib zA>g|uKoA)K42O^lD(K_bdPk<0;<7aUq!8*ETU9*9GqCfCq7?3>Vl8WW;G1!`|32`a z_8-gtOz!O60Ssh!>VMw*U+~T{_|7cc_U{~k5PXM{fyn@JfO8mg6gcV+&(eiYf${T! z^lfTahwZt-IT^!?VzwWV_NR3Ls33>d_8&6W1A>`>C<{I)#l#gEsiqz`4r_(E0LSu? zdcM_y^{9Z4oGoZ7T9tO+8arG^tL7f#PhT;x3) zE6%M$;EOR>?Ve%wA|W-hb^f zY7xnY_ow>`H`Ntq)D5%T>YZF$DN!kp2hurF7$Q_XA*^5xxmInea4+JsY*zB=r+ZL+ zt!k-QF?RdsXXiO*pxYEiB|ubtlP1rpgA<$^C1cu6(O@=a+OB?eYhRvfdht~5Kn`<- zzgJ%8Q-;rc=V}z-U;ajci;veB`^0Z_y&09bAE&P`w2o%34dwA)X2`F<3OqJ<@q`b@ z3TK+tVpas`C--K2>pcIos74|mT5CMSSBQUN&}-^1?}mGDdSJGsvZg5j7CIU;N|xe~ zXAPZ>O*w_Pn>>Kpxd##|ZARj-*Jr*&KM~|p`e6TAh=t&AR-!u%_!lrTwu1OAHe*#G zMYN%}wwVA(v{4%me5o~a9exG+WUJpv#wg8V&YVUYlT*5^HBc58#x)jb>cT}qWDRw5 z(P2^;=B=AKLxz~M?vi8XIDw1jY6vke(Gjcw$M~xy@m7(}f|f63%5bqDeSZpQji2qM zXLw^fwT?Z-7~lV_UpLrul&* zW<(#hD3_ZthtJ39wQ|AOXz5-YNLlB&yo?fSl1MUYGJe_l!tV;LYP?(^r~70H@#I|3 zNX2Sv)^VoDDfvVzZ-e~S^@koiWY^B?G_f1$6ltW+iRg4lRd&B2G zMx#~wqsDltxDt)?b%3=)ktWzqPAbldw8l-=#o!1(lJxbWixv$99u)-w9FB)p$98yz zS^8L{PZgDyY+OUe?|OseqQyp35qM8`E#)0^b`-fzFS!T zE!QpuX8|g4cW@bHsGUEoY5dL8!9R(Iegzdp^{H+BEaEi}#)k5yut(^-8r1Y*@zO=K z7{qwDGlXI7-bM~O-;*|gl<QELu?;Hx75x%vyLd zNWQp!%HG6Eb9~`=d*z=37ukMc;+A_Uiv>pj z^wpx?5BLagiPLFgvMxxisK3n&miLk4s1|*+t-ca#EO>$>=#QnOf*#O-93^spD_0r%L||0GW4ox&9(iR zplm~?r}SXSekEpz2vHbS$z3x&!;x73?{}x;i9qoiYG7U0@fjvt+_q}pv ziMy&1L3_2SS z{1Q`k>_^Zay>0IE8K%xhadN$$U0BsG^mJ;{1Vx%6?dVC1%n}L3$ud=&cw4+y`%=^w zhzR-z1*zqrCz7~v7<#4yDZF~Ghs-HkUx36kNHn<>-k&Gn?XhONq2jDWQk506v`g{~ z#kd*g)SM78;G!gbw=~u)W%P^`Q^RiNV4G&4J!ky-rmRXxTwbf(duJe(X?RA@NuUpR zn0uOiujiKa=Isi|4Q-=v78r1gv=28JPvz>0P1Gi$qirFkR17P@!PT=+QPl(*No>}h z4sgEvY>M#@1AhA*TbD>s$y7}=P|@C@Vv}pM;svqOmJip)(l&EnnYBjjOjOC4;c?_} z#Ixn~Z3$zUjgACp(;qQuN#$Z^g)u9}1?hcB6W*kqeWo-KrMIk`y~USL<2mN2)BQO} z8e!s(?Rb7nnu`wVn^3X-I!oY4>Q3et&st+F@eagfL0VL|t7$W4ruQSO@wBEvxAPwf zmsLrQ{NS?vri6PKlO1AuYTKf=Pe16~6#VWR+Eo$CRgAQ*(6XhwK;n$m%cZy5E_*cc zY&ffB*1DZ01FGIu-g`K5C`^BD+7Os-I-LKjAdtUaW&1Z*rAHID<~#NXC#Cg|jCp8S zdeaxfobyVn^QTF>)HX34wpXeJ`Qoz`&`LYrNw$xwC{2?hT4N%v`{`=$On_WTLleGt z+)Dg!Spmc&hPn_6uX|9s5WI9|N*&I(tjYLvxtr~x1iBb?>329qCZ`y zKYL>|-yM}kQ;3NCE&wlxj2}ankO+PMcGZv&GEW-ZZ-PCxs)0aiOk)8;T`Fp3m2(}6 z<9?T}`oa5!JQjtFm078>;|}^cHRA+Y@8~(s5IY5fK_hVTS!zw|e7Sg6gFK&jk}HFZ zU>xvKmLP{aGCwr2Qy3x?OH4_mP{KFsx6z5S68*j*>zi`Z3p}J^RwZ*<37Rl|kkit` zBS|H8%v+}{n>1VaQ=x&1W%$lyzLRyw7j6T7kS4Y(+}eNb=)SbJ&0xieWv$=wV~JIwjmt%{xZ_1U?*aMpXwi+hp;mD3fMPteqbmBH zy2vycb+ZHLRk^UqM@e1W2Pmh02OQSqex8q_LGpY_%&eJ7&4-@aHxEWT4c7=VO==7n zcd9anjYKWthBM{STSa~31L>psT#ATJ1h?CbNs zTEe>zKePKU09{ic_0@rZ#f8TpQWz@VNg)r?vAyiC?9WA*xQ=hJ>~P#vljpIs#e%IF|L6>fQLg5j5!wf%2L^$G8p@M^r9-D_{` zFGrD$)N3iE<|r;QAEv0fsWV4$|1bJR;ZFuqN31;ivBJSGCt^HT6~QeX_wd1!Dwm+7 zKhC09ZjMoXJq`7SE)w-oH(EvNCj9ERHsa@C!^1{6aImaiS59!0xr$Px zO_=&7^)uJYY}rQ^pXrvB$n+0B6SJA9l$kkt*gW-hZXS z>YlI{9U1uB9ntBgdkK+fX@iu_H5Gz?P${i5yT{QpUNw~4_$#k?p z+Qovkx>9)-n^guRpL)-wECGl~L)KJY9Ef%9X0Q%Xuv2{_*2vKq-2|&ivXbVWiu?tT z@L?{>94Z3qR%ncs^-D$$ zaYJs4OOdEg|K8Y4EYc?JwF^$rLp7l%1hx8eopg^Nz6laH2LTnGMQ8al3L0#cMy>5I z0nbm3l<)Hk3AT4*9DNjXFOgF_`&-?Ifr~<4oC%k)gXPVGYGOl%V~ZbvLlV6lIbTN4 z6}*~p*ZObmkx4*49fq$5ETvz*cs(eYjkysx9vS9)CFeaXRbMD~F^J#DmHU4gnZiMh zlr9hK?TaU2|A5gxGbOhkoZ`ZCm(9E?^;r+cDI4kM}El0T_S;b-76*U&=M-chbfPh;10)-#B$$@zyo#vD6(a;@;7gwh2*P4tf5M}DRo-djA>N3%raj8p- zm^Wa*79FnJ+9{SU3v{UG*>;kMgy&U17L2bKgslR8rI#L_4wNUEQ0FFgq1D+kL)QawwZd zIIKxwl$k@a|7eV%9;jvg^65`D4Ll4`YGvuOI%ZkGPLKwA6?fCGz~bIG;+^`n{}(Y? z?`$aA==($q@r`{)GYRJ^HJdiXc3n_U0YVBPRRH`WaPgTRei+13CRBLbJ3EN+HgE!2 zzaf_Yz_2Crd|+OBO|uh6Imq~pJne7wJ%!V|x~r`1q5f7O3QfsO(l4&i)) z)Xm0M+Cmz7O2mDGlUMrhmF+xHN`29H9^o_MMvQLWw-EEx*q!73aU!or>1HDzY%_YA z>f~`*jLHZIa9ZvtlxdtWDh-X9aBFgpHj-QgPMIIVFJbB*@adNczxQ~{qSQdZbVg?C1>*#RbOWskX+o$q%FlgXX;ZigL z!c#VRNJOz}ia%FcMl>ayyQv9PdH0^kNf4WSJ5JC@`3))^5eO!CQIhjIFtmjSfO^g2 z*$O8k@aXBF$BBv0>8$djoRI1kpa?yd69*=x${!Hn)oUL|4Gnp9MTKCxZ4=wckzLA9 zY;zK&4HV_V!k>PadBwEKP7(vfDFvSG5YTlvl08Z*2&M#xcAf_44Sf`?d>QL)`6MUG zTo`H=RYJLXzFqVKPUc$ii(Yt%Nyxjp(<=QXpZ?33xaVB{I9y3w>Cc_|8-3{;jNWGtSUsr=6|BvP|MO`>CL zFEgFy);lo?kr816%Mjl@Pn|LWNq#0lvSz2b=YeOs zmU;Z%F{WKBH=>RP&ZguG=spP>b7{ioaxYmAco;GHctg{Kr=Ctmq8bt*deE>YG)8c( z4Or9;C>CIN`#Yue=>&_7We08MuZo}ae(E)`a=SyiyR7+a0Kqk1^+l!PSFqam7=lZ) zHy@tHDvEwwuDv?z`D4e>Qe>95l_#ofj5Su1h=uK-MIRVj>SbGJWh2ZUvBspF)>H)j zO2r*@v1)-tX?y_|4;SX{g+L3v+W!LH3TbphKzbQqnXzEIufL$Gb&V*TUdonNpHnz< zAmGpBx&>s}_O7j1fN~QO9=(F2dBzNTV=~WX3NmX`UvOc1uj_0XRR(11rmMc;lcO4JwFW-Xb#7+pOfj&)JSjDgl_8yyz2dm9RY*Oroqm zoZt|6RB;;mAsVHkZ&okNR$b;iqMzp+z?jm@hpOg&%YE9@^H!?OyG_l4P3kLEh&B^= z_gf5HOo{Kn{B2Pr{|KWZx77WtS8NG3RQ|g9o5UnKp|)+$YP;E37e{2iGJPKux69HU zEs`FwHVA4yeh~V|+|e5NCZ8<2u>^jy*x`pub*wIK)&=W&UU|Hww8tXJ!zRy(fOdB@ zt_iJQCdItn&O-)IRBb*mVLKin4eArF7|G8mG3znILmS!r1q6bIa_%QahPn@z*bcDg zWMnh)O`J}+)6QaH0*+8lvEomE0o6`=I7(2)f)c6Q*jV|~Bo??kg*81qcOC?!NfyDi z`R^yLNMS2v;8=Stti<69H}D&^3kmme&LmH6(>(|O2Rd|JcID1o!kjB>p4BJo4nh)5uHG+W)K94EYzpat1jB0C5!J)2TOH?Qi%8@fYt&(mp z)FpJ^Dren|ub@M)xF(L!fBWv~OUhy|Th)3(%Ff%v*Dw)Uv#mn*=k9{WxirbWM}ApF zw5P8xBg>?|;1gYuq6)ONc(R6$ES4m>LM^n5M?#waoPK*xS7FS=o z)OWghaf>csr=vl3jFVirt$|chhu$Fet%;1GQ zP&dCC`fhHU=i9| zd>f|WykP++{RLE|L;PR4oV8gai|uRu8t{X=%d}mOI`Jxg)LKxc0e!~o>Bahs{p}j( zhb`kLoLZB6V2Q~+|0$5LRFaD%smQjqu|x2g$cUOifIjj1N;Gvyg*y2&Om`o`+pX*=1PhMtzU9S$0lw z;!MjR8D8&rc^e;*A{2qX{>?(uY`Gunl`z2*_!O%w#+N019XQkVO>di99 zTDWkAYb%awo7BHu6P9~-)T!ltUi(DOIM4zZ(sAtS4x#$I+<@d9yo!y&@T}vLb@>Bt}V7q zL9_i4;*D#RxU8OM!{<<2f(#>B6S~u-$1=c(yl2D0v=funLcXuISpEVGTE62dZRYkw zjzbSq*vxf*E$d@CdHMC(i`A5$v2x)I@LT`8#o#WB^Mo||D}K&2J#B`C0B zM1(+h3h{&L-#-`AjVKX4eMe(!A>_}Iy4$*Fky_b^U?z-0oc6RGv2hjewes(vS8+dr zvBK;c-S7qaRGD&b38IgHt+I-n2P$K` z#%C!)S(q1K5+{}MoXnmFeA_q0-m>cQl6u~yvDX!Ry5n1qLD zy)Ly(*?{VU7W%ey}`e*`F)BbE$f!EIo0r<|FS zW=FO^jd!xg4aqqZ%3b`n@5l`PZXp=fyD0-BYfrE1Y*Ui*y6#rtwlrnr*n@XS^=2*S g#7`IFFWZOiB&O~=m8OCF8VL&_+W(0a)8E%K?b)l zxJv>d=*@Y*`&Qle-mktNcXju(YW1`0>0RAbd#|^|r-_gH<|IPe|yYB@6 zi2-dmw0JmR04@**4~TO=0AT(zL0mkXf9CpcBOt`XC&DHEhgF~g;Naon;}QOI65{1it0vXVku2kS_#g?0^DNPQNc7*{5tqih57 zg5ot_b&MMBGs^OH?RCG~_=)}!6CHxVYwVt`$J1IGCYiA7P+unX8B?y`#@Vc039prn zMwONEl@(wzx{1Q^59`;@8eM~avOK-TE~&Ke=A~p5!&U2f6y)o;jJ^0m|K3As^(?dF zG_v3(zn|4_U}ZYGQn$X=Iy$eb9z>-I5_4|yXbtR%jX#kp@pe^$pg^Y6Oa42XgW6_b zaQWY0b}$(Czk}+Fapp@z!0B(@=&PS5W#kLVVy@c$M>1?m5P9!|3IT12`uFj*q6%Ae z{7<61z;z;-hg}vz6-x+4D(hO@l5#r-E}gCdKCuqeEoNJsHDS`J`NlUVE6s7*35_-{ zSaFwTQ{nI60oIvP#=EzL$hrNOX)+8x{m~aj9BzNb~B) zPY2NCkJg;U-<Dc zu~nPlo1#aMqlgcB*4z2H{;%TqWk%A~yw`>?n{J-#@VL~wEcJ>z`0`z&EQh9CWjT9j zCZbs-?RAFq*Q6o{Et)?^nr4sqn(w6R@PnZRv?-Ic|7}*$yj7+^%vyi7Xw>9)tuqjK zqR~{P_F^b|0|+z(0;zyNoc|7NKMP8xxQs_pz8~)HR)16qR0?FrRYqoi{G3f}AC9y2 z6f?;98f{30?{HltnVIy^l#89-1)-#)`BEC!YrJxA5uR}e*Q|v;@6^1o9)sbH^q;@-hJ!i1|KGPj5h6f8# znLJ@;)~VpT?0_np*NqBL-pD;(8%<=Wwpb^);pUhMn7p-Cc5d{uOx=!a@3n8wAQFOW zdOR3!M_ydaj_gB1Tu+CxPeB0n6ER|MKpP)@PCgr5oU!bRF}xJEwJ(AN44Rxp2P`ae zDNlYP8m}2xGPf#-jciux6&ue8-l2^pd5dK9odb+&<*?F>;#J`Sl6s{d(qy|1?s~47 zb4Ng`EPC+Q05dbjtl+iAbOaRwNI9av43ovwkJxj%38&#ijvB&&9@jO{C~X zc9q5k>%kO*oI1l^oz=tcHs5{oTt#^C;}p;FLo@pfrOB0zvLn}AB78D#GRHhW_jq)F zJQLll1cRslr)7t?h-wFgtk7?*yo$^pP~;jKChDxVvqXr9B*zOv^3^wkqw7CE|G;63 zN5DLhJ#cxqU--8G-9vuk`>ExiU~l z!@D^bZa~)VzNc-;&dEW^jGQ3EOKoaE5qTkNoI!4Q8X)Dn#I%A!R~XrVoM8!dC_$uL8 zVJ4D>=O`Z~WvCH6!3Hq&rmI0*phvAO?((oE2Fux0TdP4XS%rbG9Z1t>NDj72BMdx5 z|C;i@T!6R`iNx@s9!-BPJ}1|wYfSb|G-A=xiFv2?Ed9Y7#Xt{Q5TkZN%PmDIk*?ws zvXdbJ_omh$wF}pdPl+vfKMcIH&?U1Od*Gld-QPA52g?d~A=6gL`d{M#QjAa@v;~`P zY119gnHGkg#p&F9BzVc?W#05<+@YwyEs~LvebO$$Z_o$-O1HDcn6}ao&Xh1<#3a;# zLr;G!ynRj>R4qiu@#KJzl9Q%AhC{e7ONEZOn8acjaPhD zBw9Aur3uR~VA9FKX`G2-X4+egn9Jom_0eomW>+FP5?7qccKaj8fY11Q!etPAEhjefx zn(~SMxuZH%I%g&2=a2IBIzkTtM|FBgwF|mT?YfLLS+ue)UB$PZ=KFK5gK6A^`!z|x z{Kfyu2>vfn!PY5E@C(vOr(Se*Rx*BfF<8Ffm-THt>)fm!Vlo-k5i69S42J1M6=?2JM%?a^GEz6u@_R|u*WiVZV;u_$x8F%RpGF%tIDuB@4c4A+&IBk#FL6kOsS~_&&Xq6nsV7HSIit=ZMXc}plxz4D4}PBsAHo%5w_H3-EHs;IjOOXffBH@s>9r%c zg$4KwsN0>NHx{{wNdx129JzX+DfvZv!+MitiSB3F=YVgTEA%EFKPN4MU5vhCPN51_ zI%VRcaSZ5LmptX9>%X)^2qjVSfCv0OS=a}_48iP-CUPOPy6M84>+Af?7KAFcU!4Tf zPG(n4lFP{ctcPOdq$5@=*yP#qw8!^L0vSyFR8ph9l#sK~V?8QHb^{VMj-(zlG-mW<8iEYF9i7D31eFzc0I zf&;V8i1m`Ekac9%o%_QF9_(Z6>>kKT;e;Qm>Y1z#@v~*Atoe`dDzeW%|lNMbd z$$^d5=!oEXdFHUd0&g*NPj&bail?rkrxg05fqVf(U%~c2aP9K;tW3RhxR#yE@)wOxYoX(20W6?GWZCyWk*0HUmqvnnxTdpUc5F$<)GF?VM4f$gc(qjw4Se#Ky; zP`uOcUwN4aoi=nD#XP9)KBDQd0Yq1FR-oSFoAg_;aZ<@zt%-II0VVJGU} zqwa4Nq+ES7ru9-EevPkPiqh8L3>kVRQdj3rH?-CYb*vVD^laWJIJ|e{uRILev3&)9 zsA^j1{k3pbkXgH@E1Q;RL*D4^ zt~&4TlWMZ0M81~3aC8b)4!X_eHeGLfcs?!>$BbBlS*5k%@y>oCpZ2}6qg3zci1bM- zE(|*MRjNyX)^Fr|%5xCkvuXJw&z2dj-8HTcR^kABB)=zJW{w5W== ziJe?gL3e4_%F-hCG4%(-Fy3oRrds8`b-?W1G7fI7*LT!^Nnz z`2@~fT4-D!e&FseQn(dWvPS4|ND$wpX{(8n0%Z>3053d2nt(Z{;**W;m5C?EhUKJw6io{9G|bDEAanF6uA9F^veD@S9@3 zP;&NnzuoE*ziz|Zbr|vK_CjI21Fnn)T{9W_#?W|2Ex8MPA}`-p2HXSqGQR{^vG{SN zIH=yiDNI}9iR#l^pT|4WN|smn9KM4KF)sKeY>ugn)vgs?bY3*Krj}{lF{TB#AzE0K ztPEtQ9}%5$p$ZwIm)kwGTF{Mtvh*@)a4_Edlt?bY;2pv}(pU$hS7OeX>1j<_AHymgWt}=dQOMw*M_kA*7FQ6ygd9<{{L=D+H zc{E}&&1Ik6jos=eQm9)d_`y&v*KlfI_m3*g^Uxr*T!SSF7xs;eyy&?9G$ z(2(jFxJIOYRN+dhHy;PEh~as9e1PZbp7}FPf?~ffu&iRdq+U&YgWn|~*o-G$$e0Sf zRqY=a@O2g}qKP%xS%D9#PRk3PjR)d%9(KLZ_Q^Yt4Lhn2RkkXEILh56h9_Y71?=)B zhdeCg^kj<(wj)pUmkKaH)KRF5wrQnxZ=K@1!wqEx-()MLw(9(a3UyY2`Ml)7P)N#R z@bh%9JE*yI-bP$YuNa%z^z#5ypi5S#^>LN_Y&KWJptQ73@yW9(TL??g(21_>dD>e2 zsWXvbvqIJY`RJ-UPoNAYoVF&eSRnJbTWllALmB7Wx#g*6jk-PA=y$i<CwBT(~UDSK5H}vtVDSF@U zY z%0$(L`Jk?AD=MH|zS~W7dHG4y#1G4Hg`Z}Z{v>2J^}on%N(LeCaxg!yeoU6mOjnv) z&@N)7dXa$hAC^tb_A`<$!n`eqJQ{#bH`&mr={Ji31!iy6fo=<$3hcl(?%$zMy#P0a zNbVS(-aPK9$hqMq1@V&pm`RsX;n~zEPGs?&qLy@wIN>TC52NGeccDWXcW_vlZo$uE zr1F|JnUd`rzn=F1Z$kI-bh!_3)XihNI0|c@`ELoE`RAEmnhGQ%qw1K;Uu7#<;OPRF zPzz>o(&ig+`5RwSoQIzqk`2T1q__$FS86SdIA@vQY!aSxzL;iQ&sy@ zrROn+%{c(KmeIl=EZe1J+3YX2ldPnU3UfdG!ptW^ST34SJLsYwJn{LEbtEs`pk6#CUNo10% z4)Lalja3J@xwwF}_SZlp6Mcfm!E=D7N2BNX15ZHY2RihTbV3xTVx)Q6k$XP!9{E*j z-Z$*{1|DX&vlFJ0ZE>`27?OwXRHj?O1iS87NaBL9#|6!Mis}hzBpLV8O$&`8vh@^?uxa?5bI>ddqAXgCx^3fiWOsVd_2Po--T}^{iigQ>8C?S|w zyv9pyP3ceap#!(vbb)r~tdb#mu70s)8pAkg!9bppT=60+(d?IALFWm-^B^yLH+UQ{ z`{N}~-2 zOW6G|q46URn{KEv95vSjYjU(!Z~Z75TWSrQ8vVPL$7N({h|+V-Tj1?$spthwm9yO@ zp>UPeE`dk~GA>3ps`1{?qp-4&Z^kI4UNY~R%$5Z_4So@8J||BD5JraMrQSQ{(G=P_ zQuj5EQkA-c9^lr7dnvda{I9m=??+fQ00S!rEZVB@J?o@06&sdZR(asKuEpwkwZHov z^Y_b)uHlY(GeIE~!%oBlcKTcwYMrTRmeki}VZ#Rv_kg&oX18GCef89v;d8Y+wmr7| zcxZTeg8%Xw%VNQa9N&V^m`I7R-Lt6!)RP`m^@5VhyL$lL!r}=H(LG>)K=-!js`k<| zGJyMw9}o4`8DYLB9Q}94x;QGV&}E2%cdXoblC!e}4jXs;0iZzG>~%Rgf1tLhv0j)F zzj2U)*tur66G;|_^lwJvKYME|I=>ogE>5^5`&#>HtvAAT+B-OM?sL9-w^+i=-bP1? zl*mShoO9m|1Z%W+c_O(0h194=&kme~ermOm1Fa{plR#l%PM~a>dZaV^3QoG{vR{kl z%Ql~a4vosF11Bd{BC^NZXe{7nl z0THI!o}It=Ng*BT{;igLioa=hRC=UM@X!>P^=i^yCvG9=)|fwq50&r8nJz8`Z_Td? zv399a@2q=6oNn*JelC-h1)ZXU4J&nrA3;IX{t*IyUYug!$>fp)63HRzjByzZXWdlDk?q|We@CkFi zz_3X-yz>f>^2vMnG@rM{c?JrlRA{a~$(xja-tz(RHo}3qcEA8!CN9NzqAaCjpq56Z z-DZ_p({qH|dk=WMJ2{|nPTJvpG`!~RK~ek3oKSD8V@xR6Y-QuR`dmJimeJj^){Vld z7Uv0bdre5-=~W3)HZgN?l;4W~PwG||qfmBp-IusNzXO*Jx9aOJU9>`S@{qDH;tQ&< z%6mZUXfWwtSj%0|@c6_I*&A3!Y0(hZ-Q{CuC7KnWenVE00?nbj-GWIR!>F^9;~0LL z2y1Z0jm+>a`Q>wUeOguiR{fG#o#yMN29@`6Oa|m+42cm?g=US8bgiWHnzJUhR?)#r zS+zvEq(S7#oXVDa`|M)%?V#~JfOyy-c<7aFm> zfaml)_ZHMCPjWif*|WZ|B%+bN-N{xQM=0ir8qrRv>9iK4t!xxVavNGt)H*v~kUCgO zXg%8=dtodsFef8N&5GSSuMHlK2vBEhoN)ZHbZ*k3;&#|J7a#xq|7og`o{r%wQd1Nk z#YagMIh`OmlOX_e`#VE^Th7Jw<&ro%0Gweave+3w;YCypCae69GWP%YRV05u|GxnK Cs3ed8 diff --git a/examples/data_process/data/cat/img5.jpg b/examples/data_process/data/cat/img5.jpg deleted file mode 100644 index 03954c45cd8b0f280b569df7fbd5fbe51bf9e004..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5041 zcmaJ_2T&8r)=mgjr34H$5Tq%+6Y0G-Q99D4gAyPDQbd$4RhsmU1PH|-p(rH*uF`uC z9Yg^EL6G{m_n$Zay!YSxpWQioX1|?p&+N?Eb9OJME*AkbIvUy<03Z+m0A4k~Ba0uUHQ1wkIs_(pFV7g+yqW?GdpCb{77)Wvj zae}YriHJY|Aj!XR;Gg7I+$wY&Vnzt!%Nf88&|f=(0g8Z~JJDg$(d2)rxPHxct}TiK z7CgJ=soRifuO3%PzKg0)qQ?1qaNPWIbDIraAP%fE;So>ISE=V0FgEi0CZoc zy!~lP{4>IOo`YQuUP-h{e_&SVVac}^(Z+@m4k0WD2N4JRKdL*yj%V^uPLPU+d=5X2 z>d9rp<;{f4T^b>Ba*^6QEZ~7jV#@TN(*|=!m0It?CRLSsQ2dmf1YIUF@-4J0Lv%T9 zX7pJ?P>sKq2n7S_;;i){i@`_J7a9&|p9AEplXIqmKv0umD~L1!8~Bp71fahLJl6w5YWHd)SKHT9-wH8o;2 zrGHdLcgrMX*EVwdV*KT*XEZ>ci%2k?URhi6?80!-^fNlid?ApgPv}e1Tl8&IdW9NsC=3i|M z2WfL5@9(xE4rflh%B(DH*3(53Tr2LW_Uc{&n)|t0to2`|)XkRV?^)w_Kb;i5TS^=7 zwXq*yUk7&duaa~Au=)a(8}TA9+DJAiw$hU>K2wdnIvI8%U8tqd6Is43h-Rc@1F55K zlk*$4AI&UYcH|pQvSj!=%A z>>COnHD&NU$O7s$F&EcKbnN6yM2-+wP|?QyIQ;CnW~hf?Pl$w0<2Jo4>6SWmAaKUS z^$VpbK$k?<+$B0!+4})Do^(9ON7sD#R`t%Kgg{>@_FPLcBh=BW#Y{~JDCVIsab!MA z?h~H`+~V#JH^@@)lD)TRb+(wsGJztGB~se02GQ6C@HI+&x*|+{WGBLc(`p*~re)(6 z>0l{pOxWrRv&yo{*aKn~T(2`QZJUUj9VxBlG{u45-gW^|c^)L0cyatRmTktJ=?TYe zsj2zZC$u$Mx6~gm-H%f*EpRLGL}y4W&ONfpBZNWkK;>1Pz0HOuv> z>Bmlqrg1#=g-#moRDrie-r$&&_;Z-whje~8H8FNP_;YAZ766u75s=3vKbugm1*#q_ zNUHo$I!bo4J*w3hSXgVhL?fBK<#oMZK-?8~5dTDUvO~QZ51n`$e!UnEcH3kQymRoK z_PX=V2I9zR8rtCT8vg*@BZ~G+E-ToU8pZQd^`2@uQ@!z?O251I-ey#ag=rFR;WoH* z@xwHnB9opo@Dkv*Z<1Tmj$<(@7x$n0{d3QBDLCX->O$l2Luw2**gVSwD9T;(8i&`x z+H&j?ILy@tqsN3Ark=Q(sAlWZY;B2vKGVuw0=WIwewdgne-CIO626Hd11Kk%HTWch zX~1CM-zkk{^AE!%C+Oko+r!$%xix^LoX$`VlEzlFRYfBryV>X3@q3FySfF3N?b|(L z$x35qJC-4ElV9&gqE9_(qA-ef=KW=d`@xN zxn1or?&bJrX~dFsRfBQ`j6d6=_1Iuom-|nK@_ zKEUl}%5pS;_5E|FW7HHOa4^xGft>MtcJjN~`~w?b?OFC4jjy zgZhh%7zh8@KiozId#@QQwcGc(G*CX=+@tX;BHo=!8MIGY4|Zu9Vgt-p1&0{uR)?r3 z3@fZTF~+J02(c>i(;Dt^oWU}s@Vj-4suIycr|@r@)~(X+sfGCcY|~L(`Ut5V10A}6 zB_DN$^N#!+6tPGvt!MQIO*!H=aA!thvV>ADE~WtK;bD#CTa}rk~WiT3r;5cfbfd6)HbGIjW-Y{=xIOJ=iv*>>xe@NyA%Sw&j4#Xiq z%^C%X85>2@0Enffr5v@5uFm2(5YFtbX#4fUH;8>nUw0H@=qxSJU^JstLzpaWe_Ecy zaj?hfML+laC>0HI!7=1U1rCx`X3gW^K%I|@+i@s9*o_OVErmW!beS(_Z05+-3_TBr z3UX0*c|EIc2>u+lOkHTv;FdvFJLq>lwSR)%o+3<+_SE8Z`6T?a9?My36uRT&4=sM; zth>KN$BwJ~eZ@2Sv0w@40W0a%KHwZQDf;j(Hp&RWN*Y&ms_J$ggQvLF-*#(o|K~%C zGySn{3kLWuJ@CC{x(g>#^!olS)7iy^8jwo6|{Xmfv_7b zS=+14&utxZnE*-o)8N>9mKDu0zk$styZY7s7$=_*)vWGj{f157(UkHo@1xiA%U%S~ zyTZ94!s;+BCdU6z8_~fi#oDYU}idWct zBHwT^u|1>lzrLTqjn9-q@W{OB&AnI)404)egVtdAY%1dj1D}YK9kFAuCbO|`Ry}Fu zpYQF;OlvzvqIlhmG66`bEr5LJmD^Q zE8}I&Tyb<}FkQ~F*K>CP1S6N~78fl10g8qHyurutovN|Vc5 z?VO1oYEKuqHRZNG=Iq^W>YJUt<<(dC-IRdhROg4aO6i;8fH$+Uizm zff|D)$noH38hX!+z;+}TA6A?F17#1l{(@RCep6GF`zi&QJ(zcaYU+GyD#tt77TzK~ z(i&28htZqUzI=>7n4h>E%c*+sn=e>C^swi!zP98zFXCQ9UW*sys3GWcsD0f%u`PMW z^1G{bePNQ>zt1C>i+i<@+r8+XHMZ;F1H~`I;Ai@nPp5K(To32FVI)cSsbcJrq=iYjfEN2_Dp+Tr z)^Bw5z_i_d6my+oB!xnlV6e0y{bSE3=__{XY6BBXl;=PGI48;tU%Bai1|j*yLtSY0 zm=8O>Cr@jH1plARWZ#vy(w4TM-i3{#4zgrB!eBb0DHXl1ZKtV)ONV3Iez{3(fQ5KE z3Noyev{;Z9mw>N_47=yP1X05aCvm~-1Y3X8+?1k|5}Y$hjS%DKh zRBgb($=W;NAaJvO@FS<_Nx5^_A#KSBzSl7GqK(_;OC%gqWA2PS4!Ni(ce1*N4*HZU zQFprjks+CDlZPz-z?^knL3KhU=?E0!msF6YLA}h~NcN=dBcz)xt?IFaVk+MmH?$EJ zA`fXG&$dd5FyMZ`iu3xElb##1EJQ7>XS;EPY=YB(DCQ_7XLd0eBUC|gesxw#VF)$=`F^$nc^G10mq0+DYQ~~J?`>_wVW6J)fk`e#q5PDVK z)o+nOYK+%-tXk*L;T5v*>GdTR;2Qp$gM!Ys+e*6QtT500wX0WR)_*(`@3g8$-!JGq zX<+V-pR?oDjX%{{77pd97Ep!G=)qb%V|@K2S^D05>QI$ybqH`=Go8o$O4hwo7!lz8 zj&OL-fh)-zNbg5!fL9q~iQ!2Ss(2u{`bO+&l12(~pgV?&6VJWhF@M3@Dsx``oAxooWM znBg*SrD&>mGxxrKYf5YSv7*0C%_#J4*>ak$1iX*6+}4@K0QAGnmL5GoYBB}DUF8}_ z;OjZmH;zoohDOftNdO)n!n|kYr1kAjv)x2tUObo=B6hx`qewDvQ~-@nasZfx;PBH) zIWbq-xb~ zV$!948q350vgV-(qqe=PZtC5n5^bMihWRp9(z(Isleu; z0CQWIc;>rnge_ggcCHY1-W)#g&Y`lFM$YLaj`8N^Y|Xnbkia$XSt>k3mrfIUuPHHS z$fefd_&59BcN@%@QEfSMiuhV_I=UWk`w>VYD-1Fnamuhxug&^84~LG>>bpy#&b_ip z(Xp=ob;DmHqg$K`Rtq}0iNe3gjYrT`(Q*ayetl`GFmPG zvY-catG^7G=eu45_BGh4RV3|XuPeus^vV~W%Hk(cGgJ3QX7xoZN7!Yq`zk3ycA7`H zvw!J2Zcrb4by4)5iWdw9j2AO(5Kil7PnotRA(~)=PwaI?Nuj0<(=kFM^$zXI`RY?( z%}(wO6WG+g&|1~gH`5VN>wfpt!QVdEXLn)AR{Ojbd}33pg1>GPG8|R2B+fhf*F#q6 zR;xv9dc0o~D>OWNTNKo9*SJsd8=V>>nzUa{8&#hG9VU&-JsanP_3OL5&x4ZMPP-*v zxFHUCRA!SUDJN=lYoXH3_Pm#XwUf$+)Iglk#ZH7&fM}sbkvmJxt;!LO@8N%6!~Ek< K&R@Xg^uGYrKOUR_ diff --git a/examples/data_process/data/cat/img6.jpg b/examples/data_process/data/cat/img6.jpg deleted file mode 100644 index 6e5ade90d96698776a3eff30eab0a1d1e98f7d3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4870 zcmai22Qb{9ze{cRX@ArGZH}Bkg&dix}?zv~~%suDK=i>XtB0#6BsiO%H5di?vcdq{-QZgc95^@m5rB$8|AR-2lkdP9QkrI>s+u_n1Ow2_> zN-s{PW@OLp8Ikybfm}kpqIH+icw}kM#37Q2N75_l@+`|`(7(h50MKRNOA811zfJ$c zOaumT(W{9Qb6-pYSBd}Y5-^|w95HIBYG{!BRglKr<5S1|jvftC6Qjs|6ZzB#VfTZ; zuv%{j1`~nl{$F7fI=W$suoSE{S=@A(Zw{l})kQmf!T4|tTTMAWL+nDl2#LC=o5dr` zE)_N1z2t2T<;w17_1GV@U)|>l2g!%OZ8N{WJ@rJ!3fpA_HrDuecA#cQPu%y``$>AC z+jaY;Pq3bwN(lzeYB!Nc3S{^{icf~lU8ZCA2iLEdVq|iHF93B6yPC0J;zV!89Y(dd z?nm?8mA%bR;)@&9t9R!m2}VFJgChADY>V>qPa+#@DddQCWv~=USWm5cM}p&x!OV~~ z_mq@Cdn;XQ<`M*WmO$G|#S$m{&p_-WLIQOn7zg#gqx2mw?;nsKb zb!eB^c!*X8K>3W}3(lapY@|G&L&fiRfas&pCdIuAKA3KExt>e?G90NW>52uVXBWQq~TEn0h({SoEoOKD4 zdHBieV%#R{Io2&R`}fKgxDD0wZf)TFerUPDRRG&d^+|k zoa3HV51=;xOo`8XS+d(4EyW=w=Q`F-pzNFdBk+3xyDiBAS1^fdp;(6$i7)g|#N$s; z-8feWkQOHFrj=pIk*>J~99|a)%#PG9MMK9e7dlbDS_WGm%8K4+mOTha;6!Z}6QU?a z74!a--(4!cCbqdFdL(Vefc=)DaaZ5rpPrMuRgg-bYZ0VFU9;LY@bnFn)+_P1=b7)+ zc(=mV&8^~+&Di_s$Bc1m6`3*FlWYqN?-N9OZ_zF5-R?jrHeB!9{=jR!uvpI5+?0HL zdJe_mkH(IA9{9&HpqHY)@(Q^ZV9F5IfL+=z;zL8$IDkM_s|3?ZQ|U3q&)xsA_cH?! zQIJ;MROvlNbH^4ZH2w&rpo;(Oadf5nHhIt$D|lsrDo?xKSLYvLqW1LRb|Lw}bw*4NGY!>~C7rSYNVf#{F>${UKyci3j@ z`fio7HEB}(xxF@W0q~rN-h>g%K$)W4I5!=|69AH>%qWp(u`z2k;FB$|X5WK4)6GoA ze%ZIJh%US~Jn{($x+T)P-f%o&=$e84%znQwTw>%q#r2YLP_tWISoz8Zn5pg`e&r`! z5FUf1p2Z}Tj(Dk>g$fNgzjI<2fU`wCdxAumN4N*rhGoL8u=V0G8A_v9-sE{o6=g>* z%}s{Nb!)P|rv<|Ub*$i%1=`nAIF=wpwY)j%ghH{dhWMgXX*)zNpF(`Qi4*@k3=|mX&NAqdM>i%F$W@%al z9yQFz0)ekgEVpI#JKJ;H(nG)!Bbsa^yH5J6r`ul$V{4wm zEY@tTyV-ZrxK%3SpPuZw_sXWG+;m2?B48V76x9@LJ6=042s-q&0Le52JGUNfQV2=egT8#}iCMz=Leh?p3I#tbE%wd+oTpjg zB25w=F|F+Q)D=;gtqbeuO8vzjl8snz@G6CGm`&d6;CKS!m8H=q&pA6X^4={k_HM35 zJYP}c1>@+ok*r%({CSa&5YfK8Q z4KMV@x`-T*aGddJtY(n$9oRl6b#sFu;n~IeQWm*pcI}QTW(gQ1991*St~w6S&4K4q zTpxZIT~hL%#4A&O%z+6p;h$5t@UU&Q44(c6#u}BU7fosT&i075o~z0(z}F~aP`LnF zhPm4@8Wc0^>t4iJ2YrCDzHg(i1T(6=H|OP0!PUiq@_qxVN2WMEyFmlOdobe6b(&7L z(Q;PWyw|lH5lh|7O&E8HJp8TrTr1}7YwgxU;@o~bDCpFSc9-duB0{x&<@<@61>Y^j zj~;s=|&mMdz=x0Aiv`yx|K82bDf|S!!}Uw!bU6#$UO(p(9IG zsQTsnW9`+;651>|#5ZlBO>!st&Mfrq6Vcgwv02voXKR`dNLc@*<(7OV;DWpPbS?mX zNZ}YK0pEK-$afh9_XnLESPwp~$0GorO?rv9w!iwau8=qC&Zy@Vvs9_A-YW~6f#;n1 zQGKkMo|rq@0Fe*1tp>o=8ozv%kYf8eO=3xIfivqcO}WY^?v$Eb=Yr6#!TEab)N(-= z+F6l28+Gs4McIyAzkAz;UbA~So~iTQJGt7`-WfmO+6tS~i}vr!u3)=;`w`rBJ~r5( z+^7quo<6U*vv0;sY8$5)R9yEu=snS>FIHuajwzE`GnGHndZl<09|})xp)|p2*Nn`B zq^|JAJo8a{Rvpm@OuB}KY6O-&Ws!cq9L>V)!%1DVM{{KHb?K?eZr$_@c4ub*v2svv z`uuseC^bpvHD60&!|XV%96jiKW{~b?^`n-8gCG;eV`s}G%L&C zZU!3=$DzGP5lBz?`$t2Qhua?ptv9fvOT(M$x-;xj9-S}BpS4yncxZ7qQEl$rLJ?Y* zNgj2cggH{BIgavmInYdqY4z(idz>q5gae;eYXbXL18qc7hZ!}ArQnNfOwi8p&R_qT zxvCxZL<&do&zMv9+^5y-Me-Mw=mLFjNfa3DG<{Sko>xiOJ&aq8|46ypKlAG81aDn0 z_;}gR^C;!rY|z=G>QXs!wL@v;SGBKC^-!bU8SdX~l?ij~mD34Zgm<%ns8h2t+9l;z z!V)XYlk(xCe)9L5QXml|U5VS}ijwUo;RqXpK0|>eve(KkX0yHP>Fw`dHw8XAV_r=89_C;3p&$*!=9SOfHF!|zs^wXN-M{zn>w59yedVHQg zAarZBIUNs1xQ+DnAYhLibwZ@`bdV&7Tw(VkW6`nByke#jS2NE`q8XR3TI3{Y1hE*i+UO4Kk;xw7 z$k8)%Zv2yO207#TT*+a|dF9)Lspl&m+O&@ufZs>g7oa>KLV{lc4av@M;vmll<-QWd zUZF3D{9`N+?1svkTMe}XPH3OZ_Y~1Aad*&|Te#CRyCYZ^i)3>k$ z*5}Q$AA`fK6Z*P#j<(KB3uVOMYVzUn&AR1r1G#1=#A-N-+K+wQMP`%gWywoy2r@Mr zf4{o23{Jg7_6CeBB$7uB>;MZhtVk#)Au6)3i%P5^%tUoCUjc&9{VnoAB=YPE#S_L$ zABr4#c#5BTy0<;K8j=u&rgIFKA+)|Roj1uy2AL1`^GFlYdBD z)tP1VNXv+O!7n*Ix07gWn1<9!>C;(&(1Z+0GP*IiBa>2-HWf#fIhHHK*lucjbxF&i zm?E8-|0a1*`O`kHr?O5ig~ReWj)+wYsD-)QYiE1@y`cfOP`lB4K35JjWhIMW(xm^q z0H_8K*K#CLM5|4-VjbaEv&U7G+YFGQy7K4po@=V+Vj^d0vfeQ=2Ug-&IYk3S_-CVQ zSL+leU7?FKXTjJ|{^}{$()G&N5FIaUL*R}6^1?={!^0bMF7MB*%8UC~pDCMZOl;Dy zzBu{jZnyDgakGp?&xD0<2c{*GmTlgddTgmd`)I)D%-`8lS>omtgd+%w3XJ0_C6|hd z`!(l*m-U+nBGyc&$&C%zL?7!5A%tF;4xTnP%*JeE6cy6tB0Az;7B#h(_ds2{dN8tj z5bhKM2ChWVKDmmC<0LYG9LAybWeq|ph{bi4~^F1coVU77&%9Ey?x71bsp@|l}UN(Ok9mjDdAu; z^V=`ps>KOW2|iz}fcwzZex&Hvw4HQgvq9azHcZ+-XD2RA}+LlhI8ZX)XrV_p6rSO7$AJr(YKM#v-4X1r&F!amN%wv#@ zbP}^7Yf_2q@W{m5uU6q|w*|jE*r?X|aW?7u{+S3iMf}y$blhCkcfszp9mufNJY+>V zWnw9|SheXC_w^d2NXp?&hE+yQWrOdvA+T-NF z=Ze((8qeCiqrA!JGB=nM{I<-%29qxbNM8Ulrki1Bn>y7oF-r181DdYptFBAI>2eEZ zN0DxKUn6#WJUdMgGt_j{dEJ_If(un^|eM%i5>-DJm7 zVxuuT&GRQo8wE)YCH0}1i(AV#?GDDgN*_9reuheYcz@e^rtoV?rYeXu{JStdJ*OVU zo*SLhMK+FB*>bL#xmTcL-0NK3rx756!z`A6c>sRoAibI|xYYJCFNU;gv{TpdIAgaz z{w|y&S5IJ!YKzWJ%JHvA1BA-lh`co)qKsUx;imChvJ4QR?Rys>dZ--gkbGH2ldWijgvl&)~Pc=XDymUbPyb>E;L({@8Pb zJo|OTibF_h`3aLD_0Qm2+OqZ|mp}mp?*b)KJdc}rd$sZu@Eb7eC&|7v*qCdTeRt@Qr@GX9Qms9a3_3%nKcfdBvi diff --git a/examples/data_process/data/cat/img7.jpg b/examples/data_process/data/cat/img7.jpg deleted file mode 100644 index 425733c158612f256cfc81bbad3c393dc4c6b044..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3193 zcma)7c{mj68XtSya_SI1F&Ghsu^qc1WMb^&R4R<+W{=UvBs33JDx`ppb<8Ng)wgQ9&U|RY_R|B^4DF z!4n{D&}l7sWfdhRE-o(aW86Z=kBcaa3!P9>R#yJje@sf>tv`1TH?_XIeheB)O3DYX z0X#=;byIj4#Tl;w~T4+{^#5b)~!_niM@kaWG_3zSp5Qo?DS zLSmDd!IRgfSiApAkVxK(_pd*oGm)O?7nm6S%cUaccxnQhA{tW)-;O zv^GEfz99mVV;>;dHZ03ARoqMptCxCUJt==lD@2CsOVy(b#sDaNFY7JEP$$BNZSUmU zDiP9q_th$DeEshCp2#VGPp)PsR6WrWCNDOxVgqt9jxx2VO+i$f|DDtRvQ~;+w=q41 zU9Vj@phvW^=%t2N?R1~IDB>IUI4TF zsfzyT&5D_614c@^4C_O+I zG7LTK>ZuBp4-UnBlrP^y01n-HQcGhGx`q_*zwn1lH zY|548YD9Y=5hXGV@@L=u%Nj|;J9WvW;(>e6b7%NzBZIef#8g0IgOgKVnus3D)!kNB zw7@ZR*7c6I@=79_4^^e8At>BumE!R%<5d7U{*r|vKYdQfei#mPmjgz-Fak4EX)_LN zre)#)7Q25C05BN=X;{>Ag1cB-Iaq=ru+Y_+lx_#VJIGiMqGlCCVR=slOTbaTmcV9_ z2EmK^vwBj6cSyfY9|6Y(@dVlEQ~SscP4@U9u7A}b`K233oaLDBA$-&YyRcX&Ei`ZVJg_v%(BQK3qn;#_t) zX=xD5twPOo8{292tD;L=g#FQ>z|-JC48ZNCJgO}qn)zIoCfGp&zh$u9dnm6!HMd&*IKbxpr4XzG6_|p z2#`a3W|lyBY$M&`HrYha@L!8GUeUd`}gY(&iyjxG>$ySBdrNn2s`e2CD+W^ zNbmueh`Ve=4#+7nLM9?)g)F7(d#SESRhp|(4a$0re?z7%eznQYS4v;E+6(dm(}eWR zkG_QuakI~VP3fn#z+k$cP|`&ykC{_{&X37%S&h!gu?|MUy1S?3S2{~C1`n+cZ=H?= zjV%flwpx*OoG%ZGTWENCh>%RDyF25H@Nr>^gb1^gxVMFtBi3HqqnlMN!t-h>61L97 z1WM)R=7sGTyCA+z`U*J7w4T5>Yw~Gb|4M#n%exFsJEvo0E*zyU$xLCG54lt~@_Ml= zIa>2POYvW{j(=ZWc<)1Vd7d4W+W5Ngp01s?O&|3G$!X5@9d^0#24Afd4#Rnue~H^ zshS3V7{S>at2mZ!HH}_5rQtWR6lkPIPe6m+B}5)BO9R_vJoGJR0qnp3bWJ8>@12Mi zFS`fVZ{IHBKKxixGuICQ3ZdySP-22jjnr30d+<0CnR@_;kSnSHcD9Pfzjqr6FddWv zUZd4g1#9d-6d4k=j;U05Pv6g4rV!gtzB`3$P&yib#o3KLpYU_ z?$M{$d6OF7?(!@LiJX+Fnxh>6WIneh%{SRX8NDl;8(inc1Lqe!3Prbl-SL+2jx4@p zUAnsK*1CTC-G{G^@}xT2D`L~;aou9%pu4^h8lkoJK}C9BPTE(zA;=)o!h9v71e>mz zXIZ69Kb65KeL;#5%$D-xO?zU}HFX^my>+po|I<$r92y#;u)G%%0?%Yy!<%J=R$mqL zCIqI|_kZcBw!dQrl^*HeE(U`T*I|ylGkEunhr_d~q~Df$()-n~UX9|rZ8cb78ulm) zlu%C*&+F|mV6g6Xtg(}Ou6~vj?;MzV#1R^8?$kzn7UNtT>tGuz&E}C7Sv(lH8hF*d zv(=$uNZzL)Jn&XA9uc%${}HC<>iN`vt;~3U8g+4IW?aIt);^jWqcB1wt0qp&2KdG62f?~5 z5zS6(wo}kTrJzaN*QkWChqUH}B8xZmOP5LhBrh!DO~~Slhg(>QJdY2$HcPsUhE9HY zZPEe2%9iHH===(4V&aIN50&{y-D?Ebn~ikdvGLzkFZ}X^GioaSayGb41^)~9<=>uw$v&8o-7W+P zmSTvM2Uy^Z22wB?2Y_~TM5x)<(8>{=3*x3jgV45*U`tfG3*1{!;?Anx2Phu(i1AAB z;lde0S9D<}8tWwBnSgovTm!A4HOn}202my(**+)Eel;{WC@sa)7>o`IQnJhd5A9+C zqS>dy-4?3$#RfCEff8_^Mx)M m@mMBWnaFyi&Umms|C>_(w|Va6vBUz4^FrqVB0pjdhW`UPbc5gk diff --git a/examples/data_process/data/cat/img8.jpg b/examples/data_process/data/cat/img8.jpg deleted file mode 100644 index 3c2cf1f8f1c00eb23b9af690236c17e3945f5d42..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6950 zcma)hbyU<%`0sajNnuG{5Ljvvq&t@uDJi8CX{5WmyHiIzXZ>2uegjLQKI9r-O6;zl8f=18z?C{|wnV?>YfeJm3JBg@70VFewN^3cBkB zXaE2VhWxYC{}F5~91sK-jQ3ZIBmqDWFct&{3kw$jVgEG&{dI*fV3G04U_Up(VRQ~o zJ|gGKuf^?}Jid>RePQZ?>4q^q2`TvNObPl|{%78QyulExzZy8{-+=!_{XbzrU{W#$ zUWknI-4Z|u`8NtFAO-wen35MG`N5H zsP#2W)hE*Mc*)|*O{_AWjEm8O`RuC~iR8S#gU-BnT(aN#jm0~Lyqd2bIkKkBaG!*p zRxPtUyom#UA9n-dPx{VHnXU+t%g8O*@vy%RUyngg8UulY37#zI|HAXyg*~^djGutN zhLUWV8S+i1Z-_$aAMknCwHBK=?|-^umz+dqYA)QaH|4kxF2nTlGO_7cyXsN+h$3eb zj%lysFNj%Xf#A1yWG(5~8>hz{#3miTg`G4X^oejk41kq5d91@SLM2R3q#iAZSV+7@ z(e`XkRH42%a@0=gk%Sk6Ny7dC3_mcLRd;+l6 zLkhJf9V$W`J>zr5+}ie*MdyZZr%Jr$==@9p+B^~KV!OPds2^&&Up|HR0`l!-+?X=Q zm}0yn3C4hvBr|-fXop@>@y(ChTSdE;4xf9Qy+63;awcwOPb)6fxLUVJE*mvX*MiJ zA5x~Vb{>AAT6sMPS7=tcC=V?v>Q{_N+L)ADhE4=SR0jm(%q?e~sQ}TwLTGX@hSSNZ z7RFMc(#1SR=e-rX(Hzx7Au!V(rU3im8*m{{wY9| z>JPM4SWJ21tP->8y`0*RBW>H5gUkh*SAOc}W@&uR!mfz3D!0E=ICYTB6F@m!+CCH= z{xRi){?Qcnz~lB1aS7?I%uVMYzQE!hSz$i<_s0~f*{17j3)4viaDPO%))KrZ4`Zot zO+4wM_AsVYrZH|`5J0tmseWkk5!?yu>1dh6!LGv>I{B8R#yeB9q8wYyR+IC+ zTgxxds^&B-Gk73XQOW=}^IK#qK8|o>g+gRo%7_`8yoNiOYS4+s0atVD;1Ia#=!&Vu5a06!vuDmJB7IfQ%T5~0 zYcQ_Xk&O67?H~6Ow;B_kC+vDk(uw2sR;u#^HF4GpPj_6 zCtZi>GH4FFc{XwMC0K0C*E0q@PAQNGl5hj$K zJY$YDSv?ZR}MU(97pkx*t&DcdGza% zWLiFZ4E3t{cw%-FDk>`lpDErL-=SFI{+S64ccz6;RIF;_&9*l6Lpi|&>xcU5ju2;B zEYC*a;aPA6^(Q_^4inVTB4xv#J}Nf16=N2knO`|HjrHA%yyQ`HqJd=D_Tue$L~S)A z^DP-p`oxKzuTj6#g^d$E(M85Of?rF zhjk(#{Q%4kKzNKgt?(;MPIfduH8_ITt z+Krdi0V%F-+%QP({>S1v9 z#nKSPi7(4C*S}=g;uY=c(}9%!*o@ZrkI%B~=@+djOIowjLu9C}wsL3$P`RtUajNlu!81@o zCp;+cnO*_IJ0LeUYO}W9E8%hTl+vZ1fFr|US-rCgc`Y$$;JYKEr9Vyq#EmENvv*erA8}^@RdvlXc>v9`Ma;61I-@stR1thtZM8niDEI zKTtXp>MJIQZpeh^dn8aV>DgQM__8ZD7=`L7d{E@2nUaKe2EaXMyN$DZG&1~O*Dh~Ad2&c&H{SOGpRY+H?q39qh5!Z>;UxtPO4G|d0qHmXz?;Kna8g@JvMeI}~5qK_|%J%W`DoQ)YReC%2%@Gsm;A|3Q-;9ks(GJiC=zTDDG z6g_?aFnAWxhH!!1vOh%!*_!0(37o*?einJ3e{NAwk7!GJZNltE?hB!&_F=w|oQPw0 zNfvv#kl+!uZL*P~)8Ax^r3a@UoT8C0w)fYfdBvne4kaX6b7q zc+`;))7bmjUZY8om|1PLMwuv0JAZ+1)&yT=+wQD}YWl6}CGh4LG z&!T?qfj5N@=cw6|40(9yHb;q=X z7>^dQ5jsHjZPyz#`irYagS}up4TlHw&|=Z}rt{%~%F^tP9un%(g2Czx9Xl;MDph_N(5buVEV?ETzjImd;t<7xrX_V*}~ZZJ3Pg zK~sJ&?D#it0~NO{hbNKwp}vC-nv6$k1;(_`?JLdc9&`^gBm5;K{3dLilHg!D->}1A zehEEQ`rM;^V~^>>x+=zjqS?(7w?n>6`hG< z$@$bKkv2J+T`>jgjhqQxYxkN5yi6P%wKMeQR^}>Yw<$zYl$wRvyO#~KMG}gP1wEYS zZ6n`Yo4*LUUv>xZC2za1IDAltIStrIl1QN#??c2Gl(Wz2X~} zh6AJ?I$;l{-T@1@gBTIU6D232CCgTC_VftNm*#roq!p8SC0*XYut)1^$MYdAOC3S1 z;hg5_4I!a;THx0mpz$D)R8Al|;KpE6df~AvlXlpmU1%GMt*^b+he#4P9 zB-}LXSzGoSW)RLx^~lo}p*fa5fnF+YJv=YmWFaIfU@gF$Z2*(0FAYe^POa^qN8-m# zJ9+2CPgQ8MpheJ0xFe&_glfbZ-l%<$6-{P|`c6DVO&Xi9**E1Ob{L zNvxc2OF4Oh%H4cc_-n+h=&h|!Y?0jq9ARE3xDtrHv=4>JSiS5=#fY^f2&+$PpD*d=4~2Oi7S z&q(UfTmSQ9{oC1UtM)U^X`gGnd4;K8CdEz534B`JaAMn$r*vUS3XRCf5zW}8KlpAw zc6A^wzM-Wjmx$6=iwz|9&7EYzqe!*&uH?$Q%G3(RT$YS8$M$C64j2*n+}b>S?(5zO zwF{ulqQUKVGIoYp&#l`wD?9l;`r(~-{O*%cdY;%x3gaIc1L6iR*P{&Wcm9lQbpX$6 zZ{)9U1;<45>w^!TS(GO1z%QxQ9~IZsLt>ns-*Bo_#l*oF2dXEvKZ^#t^EtanLiC5+ z;lj4L$l~*vAa4ts23nE5yc==$x)!;%PV)5$;;((2ee31fpsvY6pyUpS+Rl7GgD17b zZE{_1XEUI>Ui~h}PibNIrzX{fGu62I7lt2ygf$EM{dxt!j(oV>Ouz2m?xOC1_85_` z%C%L%OD~u6y>gKlhf+<>b-0;gEU|B~*Nt2VF{kKg8W-Wwo8U^c@E@41#x>!bCTwnP zLnqFViO3&u{iI{rKLNFl0}d&b5q~GPB!HxT)i?_= zZH%^`N1fbPRq3VfJ{FNAeOLX5IAS~ga+k6-KHZu`0CJ0tb&DwekNCjj9};1Gk(eGC zvSDv`{Wr9yUhd}~XAP0ic#aAX544$+*HW!wnw`CKd)#Q&SI_moB-!7V0NdQAPYY_6 z?SUt1X?Lt?aQ5{%nn~b|n7i1>_`(kA_{>jsbHjuUGaHn#G#9+z>d-uI6&SJsfS7M@ z-u=*dJVSz8+(uaR()P$)Rm4R?g@gD#m6-mBE?AX|^Vh1G zWnYCVB&$3fruU3nfPcu}dXNrBoE&am%TR3zC{hvTKp&8zVlATb8Cu>_m{uxN@hcSmp|ASw|H@E z9^)`h@3fhE#j-{_gB^EDR&3E@?h1mLeg&etNRl!RZeRQf(m%SXi2SpFFNegvU06t2fmwPC8V_-rEGhQ$J!XL#t(autrVW1| zYRW%l_DMA)y9^%EPr=h*Un5rTGvnGG#&9oBmL%ve6p^oEp&yf|-`53bBhcEa$`NO< zouZc&?P;T5Sgg+PRmXKoW)%t~^V@orqwKOf8W$aVoqmiS@4D3N)~gf54)Md}Lti#+ zkG^WM@8oxp;65xc&<%Nda%o`GGwrKFK$KDyU(p}8JS*FKG?!ZRCqqh`@vX1S^w*iU zTv_t9u2Y&y^)?OAnSK-6v*$aey1Y)6t+JI;|Efi^$BoXbz(k>4h-(coAWxWMQll0pe6)iodY&qc^o(4olqOd<*CBD-rMW= z{qVNhZRv@r*sU*f{JXlGEpaSs$ZC3|^!Lm6fn}LPMD;B;t+QRJXn)DupP53HbA{0U zEwi+Jd>sUijX066?voxSBlo&Q>@}{xTH_{-nJ6szl`B@}WNUh&9Qu>JHhu^oMK1Vxx3%~(2!fI z5^=M-UmGqNz1YJM$4AZVPLxTHA+L=jKoF5Ic;M4@x~ca8ZTA?!q&+R%W)Y|APw77; zgLRxHzxarYyQuMT!06R}0N;Yc!@gn=7+`YPw}wtfj<-)``vvhb3^WY zvi=ep;!QQw_R4F!$;}@|Zbtz-<-xSIMELShX(y3!9MTt=)$w`fg+HiPn?OUyr(;~@ z0dGK_P^yp>hZV;9e=5odt&VJ;W?rfC>^2p$s_X86k8UVv>TYX^>hJoom%WK?8<{&# zYPo?Hu&G-eKJ$cB4NjU3W!kEyQnEzc7-C0%n9!yKJS+`ay_zP?k zBEr*$Byr90cH-JDmH9fqwkq(1Ugk^Qfu%Wa*{1bh6&8CYoY~yAj&#Y-|I=v`wV$#UqzjY6@+%w&>&lN*!+E_>Qw%-OiZi2VD zmD}ffVs`Fcvh}4BChFZq|5o};;FaYL^YD|`vSb}^>?em6Bz8CV_t%%_xzF_Nr?0I3 ze>^-iHpumZxmv2<<}^;cbqI7Fxfkuy89PVJ!_#jjUt+xP+68RmX1|z;r_qlsHQx(p zLcA$3UKRPIBQ5fseqKt1YwGxJ$7~Efu~5%aTrDH| z3g>V!vCKHJLJ|L2iWS}s&_Z0qr)1rZ?3Nt0cL;1Kewx#$+NYIGb)M*!nExRlf2<5= z&S=lQ5c4LwV%IL5t9W_bZ;QykTK?g8MaRUfvLB8=Ek(~O>sqIoD_Ab>QPHR$pzjIv zueb6_1VJyPoFxG_K6f9`NL0B+*^}$`vg{vPMdw4ai6BY zl5X4py%AcqIns`jxdBZ0qNtnyPLF{F^II5P|D*zN3oJC>SfQC!K@BZ&Plo~ z>b7@cHv4JWV=%iBHpP9Tp+ShRLPI%DMP zak?g@8$(%oWMG)qy~2a2IpO(Uym}YmZr1)8@-N;pn+QX`QXYHTJMYc)nAmj){km2q z!h!~3i}YV$ve3E!0J~Jm&pOK=dx_~?k={z%@(coV;)WBZz$}zoOc2d|SX^X?TRfDR z$qE#FJcG;(50Xczd5eoaVvoT_d^foPv_F*p+W&SaaeyHIfGaNF^u|kUy@vl)=J~s& zfQnm5F!hAa4YwLCw9vF8qdL6NKLH#Rr}f;Zz9zo>1`sI0Sf`6S_bq}vC_xzlSJa<8 zBM`xNHpNZ5aRF2SI|DBXk~brKx=u<{Gn8?JVa|P4%@^fc^U25Pwrz-$U{1UIg3^$L z@|6KXpb{o-@!I;R3uun;P;>9~5$=1O3Te!9c{h#4yaH^3d^7Gd4{X8G%-<(NFV0MN zKg)^Kb31g1k(_S5GUYuzGd0P8Dko`bd!dW>xH{D}mw303CmJQABW`RHejy}JCOPqw z8j&@OA|P@jGkjMQM?H}+7Uc2kFh=-oxF_D+lJfP-hKe<4yx^83=gh_Ltyy%8PSVMF zoR<@dfBeZ)5jbPq$E_V8)8~qwy8)_VEo)D0jqt>D(<#K-U;4a{xU4vt78#RF+~2)l zbOSh&6M|+4EEJ7vFVKr!8!67&T>?MLO56x-)m^Pd<53-Ua85D&UGzcn^82^xSE8-g zg0iMb9-G-FXK5@_LZRRnVO@;sb^h;PKRU}N-ZaGMm3om14hU1h2{PB0Oj0Q-BYcE< z*yh$!6Z!1t(4s|(CIeZCKF?Ad!YXb6WF!2MuZ&5vB0fIdr*I_9F_JnIx2jH zmLHJ)1N0jIe$@o-ke7C4>AcLF@p4EMudv?euvW}5Bcql)^JWZ`s>>|klEW;I`6ud) z@vCytA}xJn6)yQE(pAknVP#plDsXPhY#m!k0a@r%l2=l~{cnZrjFU3L7mdNa>{EWe z;qh?G6%{M)uvu~1RKTvjPas%IdcAzlzg^)dlsAb}YbzsHb{H8C;e+lZXXQ9ONv0Z# zrk@T-t+wUvSQi(^`t_N?$O-y^?wE_nV;YilF{v1zofCjL1bHSN>tmYQP6K6>_bG(q z#)EbXn@cR>gYiE;enQ`@6<@`9RmoEdFB8eyNBa-*uYTs#Djd{#cX&UJm?^V`MnqC zxAlNw2V34{wM1lfahG$_N>SEnX0DY=V@?fMV_d*<_?UKP_?Bi|7?m|CRRMHKVx)wP z&$=Ve-r)(KZ*hk7eGp^+0pM*^uUJ}(!3_v27LIq+Tf>?WFK^e08nApo?>4xdQLbU8 z_@@&*3&KX0lbQINz4U4M(nYYnU7=81&0^olz>~Yv$UWvT33&rm^X2H1g zfA%xDDstIcMc~-y#DSXP3D3z&N@v_V+G6D(t(?>kr+;A0@ zrDR?;wORAn!%-WED$GF{XBfnX%%hp{` zgNLCTG@h&z!0b8Ae64FzwWC7*WOx`(aKIxRN1zU>fmc_imV_=+Dt9?yO?B+sdLGTw z;pR#bBT}*x3`^0c0|)m0WjSf|lmlHJY%56*j&o6!U+8Z?o0p>=ZpSN3(Tq6cXV0mW zaOX;HHZVsm^|nXLX0WTmg!hPTRpN}sr-em|e|wgqDCmTF-n0{f83>3f5 z@C1%3_wz`2e=^m4kUK-H-$hp9)6H)LHRNm)R(i#Q#MzfUzixor<%`oj*q}J@g9r*r zo%0zBYwItkPam%`WGI9~`PkI%1*g+#jSOe1!RUvNz&h7@e7mg&WgoN)hSdbk2xtlb z)X>!jQ)!FQpbxeM4_&(%>j>GCCn0%BZ`CIiFPo%_ovgZlYB${?X8NA!V`9@Ffzbx% zzPV#1ak5WKy}(1y-dJQZgWu<{njQvy`8o1^w^f`=#|u&~+Zg%?_LgEDsoQUSQo|Q5 z#UNw6l7a-%gM>cO=zZM@%_jzU%LTx%;lKxv>6|IF(`+E zGDp2alN6*AVeheY1K6~!kNA03T)IckU(DLsLn)SM6(jt|*fF93hM)m6sPa%k+zU9) z+`}ErXBPNj>tc30c0acdO3bk3H8 zBp%YH@{aIM?dZDBiLNw*SEhGEbcWkVb1r^PWZDenbks(s*NaR_g1h;)?&Aw?nCE{? zaVFb^Wx>+vQ=CO9aE32s8@y|N7}?Lx+diITKnw$H`7+AK@M#9ebcxWk_qjTmm87yx zFK@+tR_|19{TxI!>ZLfhlBObo&fJG!BP-;Wl8)JKDDB>QuK8tkIYbaD>)r&)(= zHQR%4iieVv=r(qx!mEX_yXUEeth?*~^Q*dh z7J7>lpwzK0CrTVJU*#(u!Q&zqDI>SbfD;tE6kQP5w{(eA2swoX_bPgh_?m3lkjc4x zssvSz2}*!^76ZBr$u`{#YO-eHqQE8BTzGMZ@v4K4{sGa}WS*W^DJDvn%MxdupX~J( zqjVfkAcW`d8X(?aRf}QTIFUEIJ&uW*(IFWk9 z3)4qitk~wiZ<-mS4D=TjQ;AHBnSJ5?n5olz#!YqteWdsxd3bF{vLf{QBU)ptk!{Os z8`2C}{5rBGnWy^hK-0^kazt66&+gaG70$Aw3@x+ucJ(HMF6MPBHdEs@n#?T$<{$>6 z@hy=|WwaiDkdxqQ87OBL>O{W};=0e(U6_Ttc`X&`9Z@1`S zj4e>TsAyq(TW~qk19eIUhrFMOHD~%`WG<^7pGE(u$BZM0$**nP8W}-HK^)T9qtiOMn>ig zao4lmue@Z-7!Y{@aRLCi{n=Ce6n1kzSNZY#FX2@nu2%@_$FW6aF56Dk<*+KT!(QFk zko>41XC$Wu$wt7Qd*mWkhxlFNSNa3FC=qCvk_a}=jO%CJ)716*09w)I-Qp9(RPdd| zKC6DtXl>hvbhJr;&!)|aaoB5Lr4-1b=PS-XT9kEX?jqC9_VHJ0nS-1k`rJdC84({n z%oH4F7oNMUo;BWk8*Yp)Bl`S>g*s=`Bd1tD!28I_Dx9m@-tntXU}`DBw7tp$`BFmP z^yzi2ZW)7f5kDerPe1b~1#S3|!}r3!L=MBw4SNZ22yvMdCiJrpR!%f%$k+&3lhP#$ zzglg9Hy@Snnp@1ivotW%lL80Q_2Qt8ak#W>s4wgfF=92EX2)5#oFFHV)}HzIkd?RX z*f$0x1XS#Tf@2Qz$3|^ZJyS|U1MTrvQ{y|VEK@K0wq!*D(|7xN4PED$r(kh>Eb3uWVQV diff --git a/examples/data_process/data/cat_caption.json b/examples/data_process/data/cat_caption.json deleted file mode 100644 index 6c8cce3ba..000000000 --- a/examples/data_process/data/cat_caption.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "info": {"description": "This is a cat dataset.", - "url": "http://tensorlayer.org", - "version": "1.0.0", - "year": 2016, - "contributor": "Hao Dong", - "date_created": "2016-09-30 12:11:50.00000"}, - "images":[ - {"file_name":"img1.jpg", "caption":"a yellow cat looks up"}, - {"file_name":"img2.jpg", "caption":"a grey cat with yellow eyes"}, - {"file_name":"img3.jpg", "caption":"grey cat with yellow eyes"}, - {"file_name":"img4.jpg", "caption":"yellow cat looks at you"}, - {"file_name":"img5.jpg", "caption":"a yellow cat stands up"}, - {"file_name":"img6.jpg", "caption":"a small cat sits down"}, - {"file_name":"img7.jpg", "caption":"it is a white cat with white eyes"}, - {"file_name":"img8.jpg", "caption":"it shows a head of cat"}, - {"file_name":"img9.jpg", "caption":"a black cat is running very fast"} - ] -} diff --git a/examples/data_process/data/dog/img1.jpg b/examples/data_process/data/dog/img1.jpg deleted file mode 100644 index cbafc08a861945ba9b853022d7170d20828178d6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4612 zcma)92Q(a7w;o2+D5H!Py^|4b^b#Rr^ceY zR{%0%5)d6N1BjLe3}yf`GBYtSURMS%-E{|puNfl~n;<6>8y_npBbPWApOA>Cs3;SM zq^u-FMi3$@LO@PVPDM$@N<+f};b7x{h>1WTkU#tX6cLEX|B>sVYyRJO4KmkFL`3AO z8vr5&)Dy4*3Ah1-AOawW;0g=4b!{LaA@JJDe-~mB;0*#oBGPN=eHs8E0U?lxl#rO1 zl;qDz1cU$}h=`W!2C=v*2^}}w#v>%L;61&>J%nu?sbpx{J`Qfi)sJ*$RxUSahO zM-127FkQRiFDm}r2S{`+;Raoc2!YpHZv0C>0U-!T%O$SLO(y}j3AvgB+`L|TJq7}( z0N&bi6LNEM{}VX-hWQEF&_RuHzm34RFV#A*cwx%@<+sM13?vmL=z&);HEV7HD?IlO zQ1;>li9>w_QL+5N`3i3n~fgFA_~TszS*pWOSM*As%0!l^Q4YqLpGxW z`{^-6H^Paj!XI6CQJTgY4&9mT<=U6D$ZIdkNNa}G>z@9wsk3oHQ)Kquhd;NRbgK2O z9p9xb5gX{~4nI8S{hmqpM}P_Wos{|c>>M{yJQ7~izQ1=p4l(WE#53OfURf+A# zF6f^CiK-eH$SGAP?P_rp|;V{#?TBRKV@B#M< ze-lVZ2tMn9_ZvF5ng{dU_?87kf=?J(z%02EGtAGritSRfE^)^}CO;Z6_98xYByAC! z{O1iVR%8#I(d|iK%beN1v(+gPp2OA7LAO!=-Av3W8 zb;o`6<~(6Wd8V4*J_}|qU0d@~R3I-vL5UT0d6l;X+V7D@zKo3gk3jZ5d!YK_(9A}N zL4&wsJEmXo%buKhe>SNv>|Dih61sa+yOa~{hGvw2>fLYLTh`uP7uH>>{Ouy9)I83< zJMzb3pAm*>p@B8Y9MS(aqinV~I@*u8t03h|R!`sQL48cJ^<*!flqk|7Zm20nX00VE z*>$>*p!{>x3YmLKNrmJTn>{z8ij$NlozWQl<+i&M*W&LpOsnVdlkA={;jAq^BiH6Por?n2Gy zn|Ed^^^J*}8Cl{}mfc1nwa;kiJB*4+xx0n<-t+nbT8?upi*nZy0kZ+Fjbc&S2$4j? zoc5yZu(DN>XUORlV zQ_QK}O?&#Wr=k2YbbtJU_*lQ1l%{HKN86HI0jNO&=OWIAauw3AFE zYKQJh^U`72)jiW;yhp_w9g8+|3N^4}itbRV%-J!j&x;@H<&|B zwwK@U{M^c01QVPDYdM1+gvOBJBj0%bEv(TM;eb(%hf?zcE)agSMxN4Rz<7OLw?WRXhWO`-$+hflTASh^Ptf@MeV z2RnwiP?*^<6gi>t80A?`=uFJ%rBPmF%rhEe*^P6I%WU-h(~sCnXdf6? zJbjTTwB2tfG&KUF!9Wd>st~Rcu0Cq(;KV5t0_qt^$C2QddHX~43gdC|wU=@st znI-RLKBb#Cxb8sGdOxjE^*E75%F1Etrf^(gb#c;{4|9S@W8Qg#yDQKvNLwckpRC63 z(=_#enk=w*xN zLiZ-B3-s=zQ#=g1f9kiq{zc&}_Tdc)g>GvJxmHjQWOHM}+%Gce?`Ib#7s;Tn9hleu z#AdQlrI9j-)FNhyL68L-G0=Uxy~iiMV&1zpdvgiTvZ^w|3gc)#85O|^y{{m** zB;eEscv6*$Rh8Ym-@1?} zQK)dxX%uMA&s>E}WP8`<_!Ohrd)G_m)TGgU-~C~D`wX&pvBNaA{Z8*1y1~|qPlGM4 z^0G#IhqQtHNH}L>JI`rVo4(jXDuSe_0<^BZ3tYLQoI{r}J#vIJL%l}gDyW3J(d^c{ z@*w5Dx%19feK%XPq4MuUqfPx~KzNa=u5ll#l6eZtZJ9-Pywf=?Q{b<=`L)ZIL0|NI zl3i;)xlO!pF>``AYr2iWYV0|l6f^`rdT?ZFW*;LoO}y+?$0r$zl2}4nIXx;&n3lP< z0mDel->IowaP!%-6-TC3+!$!Pm3sGFXW`Y>#>)Pi`PH!*Hi4;@&<`tLw>A~ECzJpI z4F(B%h5eC7nYjr*mX^7QRO6B90(&co)lJUTnCMrofO^S_S z$~JrLwyx(GlFCMGSv#5aq3O{Mx}4|)=g{?@sI@4DRiI+a{*;^hNx?rQmLiX`2V^be zy<`&}Bu${bj^4mhUTB)bkJ2wO>2iXE`JTgi2z?LUKr)Mi!r2j=fZa+A^0rX|WlHcD zc><>VPfQ~+KtO(DxiQ^AmRG;l!E>VZPD9#7@7J^0K8l6s0gGf!3Ne5OS?(Go4N zvn_tpAByDVaAW^YwPbO4I$^ihXYP8Na>wC7VrIL{hQ(WtZ+gr`y%lG8>k9DX0+84_ zGs~fa-E_xIG#Fhx&S9L%dwG;b$Vu33N_qWdNnPn#9x8h?xZx=vPFEEIqLFV8z3s0YGsea~(S-yn>b3DPL)GI)+<5cJk)n6K`hY>wE zV7Rf>lFl+dcm>e&g(5KgeHA-26BE!xCb55tC7ii}#$j>dikjbL{)L=%ypzxX5qQuwweV zU*ifOZFQ{W72x#3=qbsQRQS4=`>2(|)B|a%_A9_g70Ty`l9Euf*D1?6W-myH?MJqF ztNc29s$=v1phuVRtwfe9rrtf%S6}mzU%>t?@cl2{86{z6SiXfY%NXg-`Zed~)5mCE zKTyB2BV_yIV`j5C7&jqVf!hQ_{*yX;pp2^@tDV;gOP}p z7{Er&vsKfWgeYgS(7Xa%0WvHHM~5()(&h{E{i}GHRx>#xGAG{5$i=SPpFq6Vez*DX zX-s$WP{``3`iQKUV00@j6Z)OKi`a#lnmZhlGlUKjGdCCF8U3}q&Ta1ENZ&V2^rXMi ziXV63E`8qlTFNx+m^D5xXl?C6vTkbd%tqKIx{!^Tni!$fSE7F}JmXd@GBtiL>m$4_ zTDGnd+p`hYL zpYz64@iL~JvAV~N?_bS|=$%nkyLoqx1#imRb(S?vXU4XuMm0&=D%ZymLHBm|ROzxM zi9VrIm;nxob6XK*Lz-gqw~0rw4Nu$72#Qah*|l35xs$ zTQov3ma%5v38C_he&73j=RWs5%Q^RVpWj*k2lRt+0IsilRTlt(006{%fP-P+F>nOR z28YA>nC~%uI3FJpd5jMU=i^5rk)r%e&c`osTtr4(KuAhNfaiqb38_41IVEr z{!btX3z(HDl0<-*0x)y?zn_D^5QHMju@gKfEkj;U@|}Yr;3!iFMnDk2C7{j_0Yboe zPu;v}OUidpY`7(oa!$E=Weu=QHo|RM-|mw-CkBtEI>ueK@~cYJZ2ut36|to*m-AFL zo;yD$o!gNhmu5IPl3p&fC80gl-6JnXg@WvN*e6rY!E`sXh{yIJW zOoh&r|J@*^BPSA;9t?JQ;-ji?W2bi$hsIBx9uYG8d{%v%=iHcHJgk;9Quqa{iN&&D zHI3d7ezfu4>=mZ)DYow0obn48C2AzyPUcoD$+NcS`wP0w%J}^7h=qv}WnAqo|0^Pf zm2F7ID1=C4C;rPgcyqO8Kur{dg2aS`#F_dJbYLWVc#%RvX0ADl@SQiTZ|Sxd9MGVK zluViso~i3s>aviW@iC92hs{FNlZ(e+I;G}pDfd#xU9KFb2S1K|^rSYRV6)Bd^`@YL znn#X$;q~^;P&eG1K@r)s+5GB9jC%08wXkQbwtz$Z%Zxgor$Y>-2Z2RG(H@Pmb|kpK zOvuLh;LM8!mp0^m`g68$2LL}_bZKk_tsmn3%n#aeO=&97F}9^Rwz11&L{%uPI<)>R z&HFoa1?@2IytZ4A6-g#7T`eJls=J$CuG&$$kK#W58W%;WCbO7cx5YD3i^1D4e2hVH zpAK2pB#BIIZQ~JaACm1aHwTS_KW2kb;;7`fTWLLhY8ej_*%;86uHUw0obhBLn`wJr z65(m^tFe%}Y2sv=$(4C2|DZUG)KzazPh=WLZqPt$M;?@;;r;F?&bI^Cq82sP~A^9Hh&P!V&Lij z@R@vr>)<5vN-=unck3f3o=mP?!?>>X{NHjXl#@MK9m}rh!mukCK zbqM7*LJC! zx^e__n;f`n|HZ|1D(JTC8_H(Lup_*+`$NA;dV&%`Dmd{DOk>9Lha3XvKdQs^Vj6Io)Ed5u4-m_tWQFXsrUf$tYf->k_R$ zUR^1tltB-GskB?uFLC`g4U)ma36RdOEJF--CL+e)IV(h zaMNS7bIB}7nmaMm%(zuSl7uMte~#bdx=}iHWf+Zq7M$aYnox2-L#%oEoDA*m%Oi`S zq665tv*9hm+RpTTZdqDI&nr#qyi3n2p5KpBtk^yPd~yuW&MI!> zW_jUxyd2leOun`1ij$S5H$7 zWlI)Hqeconu`W2Oi}*tLN`{Y(iArgoe#eBT3%N=yZcU=CeMv9Pge&FL6K-n{n*7j^ z>islXsi*3rY`NPQtIT3GKHSiqqxVMDB%Lbo@o&tIosfA0*O$Q_dx8LgQHSx&VFoO^+EF&R-R6Q`r#`t64m=hwJ6xX@H*Jb~*r# zW1P2#2VD^D1Ize`bW;CVgvuup+*@pKLgM9s5I@4BZ+gpg#DEK8wO6dSP7@eP!zfZ? z@NZ1o1Sb%hbWYfm;k@IQ>F35rUYbp=uJskfy}8q!xu5S=sgWFZnPh@7DvgSaXzQvi zNxd9helQ10Aj7~ne?CP<^hR?aXjBj?9=6fLi>?R z7J+Ppu9bR%RfK`BH17-|@-H(4R5pj7~NRiiQz7uCwgXAjQ4i6NKFpaaR9X4ByU*6Hv zG9rjBvV?k3{X=TZS^;N}b@tpvZ11-w!AV(%6up3_{cPX(h~+Rnnpk-2XCM1-_cqj` ze-{0Mqfm$C5XFAjZnRGG<}&*ZUzU-XSD?w0hA9BvvsOBbf->U7MMv7R-dyoApkowwf zlbsyvS#NdWTJEeYK^|$pK~I>xve@HnsS|VVop(}VKfpHA`Mk+e$~d~`>qkld#J|hP z_o}-lg=focTp<_n#+fku(nOWzY^1?H1D`f}$zD#Iw5n4&UPo!UKP2gT@d_iCKss8r z&h?|=j{W$ELM8QHmg}Z(;lrI0`|u9Tm~QfdX+W}ZV^De+_#{>MeSgCvJ!sd6g z0s%oLW>E$>L37AnW@aX7S$b_b3K-=){Rj2;!UN`FoMQ=;={^pD}SE>J% zvPvand~CP=d^BYH#@Yi)=mz>K)qF++9STFTh^!P&Oz+H2+gw7H*>HC5U#Lqv(nvQ+ z4mBpZw2R_;ojQ;5ao?9?lns8^d(+1>C)s-JwqE3=G3MS@8dX?d?cFAL z`DvciO7Dsc4KB^8+DI;>-WU7;!LEwKUSK3TO@lUQ$A;u|iY17fbSuLk0^3jh+_M=$$>(OeogM0slall9cX)M1tK)87DrS9@AO9>V!1TKAua~xacRlwh7EfC2 z4g(InsMvKPS(eA*K$$GIRE9s%nq+K)21m%fE$0jYMsUH;$j9btVQO% z34Qte^`%kQtafLQ@8?c;PK{`k<d+gj5lm@M>=P(lE`Git7qVM2#@wTu83Doy3f^q6qrvxE={37i z;Z5Hgc#KYlox1HNCM3*Vpk<-DgGsgLmX}v?^y)#F7ye0=Kln=Oaxyz}k*B{GI@G1U z-OD|DdeOux$l=Qu{)MBfC7WHB+ZJZ2Zr`m5)gBKXoAiX1nO_{J*=&%yypWb>&API2 zb@SxHcI$ns8b3*8>1Vs7nc|#BhHC zNnVwV^CmI|OXsIEyQ9bCzT9IhM&8}M_p`J4aeS_=+dGOfJVu z6n!qA7T=w#fhl|Z!dLtd3QnBaNR+2>5JPK)Zd!JW&zwov?5Z9gu;{niduq`ZTloch zo6hWuSS33~r<&{L3;sOr#*P-teGyc8gX{N; zTWx&1ySwgWiX0fV<2eXl`>1JHV+5KHoAhBtB7jG~_<&Zo1#)Z%}B(Cje9e zS>xAX)w`T9B`bgHm7KSYv~z2ixq_eAiQeXG{(NN{rrpY$YRewgj>Q9d4}E*gp>Y~x zWsbC2nSrkkPx#Ak6#@Rh#g8x7x)w)z-W&SF+oMqELF#|a#9;{MMTsY~6Zvg;lvAGd z*=Yekgq%%Ku&{sFQ{x*V$Ngd|E)XmWg^s2mc_f-6D=rvxo9LK5C{w^IoGhFl$-|lz Q{I_=eDIou{;Df>c053rJ4*&oF diff --git a/examples/data_process/data/dog/img3.jpg b/examples/data_process/data/dog/img3.jpg deleted file mode 100644 index e2fa7407136b229482de4769d59581093949b0e1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4897 zcmai2cQhQ@)*lRl=#0+jeME^qLSmF4dI^K5(R+GyZ3u!mOOU{16C)hDB6T zR6v495WFU1-7Y_%BkNqc8iVT2-gAK&NzXSaLk+~Bl$6*EHQV1((Ti~&|hb87#QHmV2 zjp9GnS*D_9xAX`<{49F6mF`abuWIki0I_iISnT9?JnTOS=YP#&{i&5g7^pz$&L*O5 zaXSaNf5*F9BnLbK?D0g0Mn~WK3p6(@xyUR01btQa7T-Jkbh@<}v)Vzfhlf^#ilq<21j zc`X;gR3%XTzUt{T=Q3`I9lnrsGL{>EoF_+%--%#OILGH%OjQM|uH3wx~=tvK(F= zL2s@-onU$kFqq#RY~P@(b879hWMq-``Vu@^kCPa0w)M>>^U3+DsiR<5bSQD- z0%F1#cx)E-@*#y9+5Lu>>KxS!Wa@3Q1(=hZNfqm?GJ$|kQ4h{S6Wzaly^>=sUQzht zt=T8YEui$R^FiPTfJ@rR$?8adRC0Dw+Q9C7wbM}whlT{)MT+;~U`UjcU(r2aRG@qE zc*+flTDDACV71UWgp=s%oj1yq_eZ++*?6@>evu4g&~^Ze)6R_(o}Bn&vB%VTKP zdI-7N67?*p{r5#=(%L{nk|u6eR|qeOv-Ag&N#6Qf04A!oEL{c$LN}@iaSmmj=t-RM z&ThS)z?%MgL)epIIp9uhKLP&e&D>TRBq|`{dov)n?}!FvJ`|<49`;pX45_Sy7s?D6 z_g9F0e6$L}58k<~NLCiJpL3^l!H4fvUNE_yT^D7fOS-tUFN}wm10wAqTQlbdycmA^mZeq z#fZ%deiMm{rAqDyb%elVS0u~)1|8{d0R`%`Uw)rxCI7-+yx6+;gRuO)75|E3 z8gV7GAb*v%D*O@gNT9;dOA+=T>kw{XhRrTYV`8Dls`r53okk$Vp$XCyFJV}7QZ$Vy z)bd5vi=}j^?&FXt66gm{Ouy5AnjtAEE(t}>oQDU=(s=HAuHN5H{IV#*ou5j!OAU1a zCW^f5KU*at@f#F5bZoz{s{N8;X?HE*S9|1^Cfbo2W?Z)OgP7PQiI`qq6x_$bT&;0F zc&Po{KzO2T7cr;#OLNyoBD!Sjcs7z3(u8cq)+_`Msr_1`l=H(Yk^2zM@~r%W_#D9WwUjBgy*xH83eBUDkLM!ko0%03bG?{Q9B@ zZpPT!%H6QQ=9?ZG^wmHJ`U9OU{r3Aoa!+eBdA|?WUFvPdQe|)Gsy-=9mH-K)eHud9 z2a`vp_7T@~QmvKIu_uq&z-(;Te}|)uutrjq&!X_tEY{|%@MvGVdE@(vC88Iyehbbn zvQo2OT;^6yjyu>?TSGC^-o2S4bCF0h--fCmZSd_$&#uQBLp=$rlwF4Q%gf!r24cSM z5;4QG{f#H%nMF0skG5=b=U%*xXz5-pMCfBFGYO3?Gp#`11$!9?l?n!l5JVI)t>!GP zl}No|yjD#B57VK~cdM-ec3P6CKV^NJVSF`+3ymn?scfi|w)T$e$&312vg}B%jbJl2 z$z2e#EElWXP8LQ#BnZ_94%lDVCU_#LVKI=e)(!m2Dbo^X~AGkh+pza!S5J_ba-bM{$hW`MC)k1?4ldqBMwF&Y@Ph|>6eaBkyB8g*9E&Ns-J?@sVxM{I9VEvm+NEquNifUk@7z0Ibf zcp16(rLw~7TbP^{RmJ|8Xnu_l5)0SMDHb_~^L~BS+mQempYyrdCj^9B&tl`+-PmU? zHg7ZMMCZR;uK;=(z|$9&9u{GN}SNd=+ZAQ{+&Mq-t?q)}LC9Epr`lAKJvZk}12A6cl? z@(0AH?DK+*g6=0Dg(wK*f@m5~_T61V8pqqjWGFS7&cfm*+_LhMe~#c&qEYL4!Vkp9 zWLr6Hz6s-bAt)ymEgj?=jzcx8w1-Ba3iO4T1HLI|SYh)_n0tR1bap+Jq53(FBBRAGv_jR_SsL>)$SceS_05s zVOb-Q=u1A1=+Er3rp@_brFoqpw?)J-d-bjJCz4)T-})-J6t^dpf4%`<)C7o2ZrKxhFERyX>4_h-6VS}}fHd9sScW*>#&+V84H*6~ zrv6N~2O;S5=DB9U(*nNHA}*%Wp5(qX5aD=CU`?O?VS^H9ZHD@C$r-rrWl%FG@}RnM z$vy`;vZN3ff0IcaSZ~VRlj_Vp&iO^eMZB?Z)`=+6a)nJm6Bb2Y+(auP=Ts*75;RVU zHOlz}!F-zH@70qbiBBmZ>W%f*A3U^xJ8Z&x`3>(-1nsb7bVy>yo_q`NmR%q1E^&R6 zXz34+e#9h=(lgp%NWZ3j322~mt<_=|P$brce z$BO!(W|{A&33(zivDT=!`HBT2I+nF7=@NkaW-#5B{hxwBf8GN95c=N||IaJC z`%-?7>Y2s+DAO-|_Ea9-HoLRQ*_i_jAEpZpb(L~^6Rv#3=8M@mUgqeQI#HCDdCUxtP@kDH;_&@-MrQ* z=Na#fhhn^vtYXgSi-D3j#Z*ZB(9nzJJi(a+s`tYO0*Eh7d*fZjR`SB34z``QL9wHj2CmhPdL#yfA3lLQgpQ5iyo`B^X@}~(fnJ*8nFXsFG-Il0 zUtO82?~5(aY~U$Z$3i1ZVc&un6^a_h{%M9C=)KY>u*asz`f#x~GhYZYie#hE^}fRI`~-_Q$b8)?tldQ^SM)>T z^ViTze4C}nq0~82;oq}n9yF9siO}{Os*4gV{O?x+l!gzfjzi_Ke$^P#_wjjMbXx)L%z z4{45lgrEuB{%CU@*46SUn5~HyZ-+-}*z;2_WNKt|(@-T}{b6q23&642=rb$sLcv-^ z*z|wP?aK8lz{x|+q38G{_owH^PPKV=G;5w%r|#VCr%dtU`>n$1^@&C0KlIGa5@cm z060SjfACgER5itn~ z2@bx?a+guE!YBzb;MuchnVFbjU@%OKmzNiX68l+xf!J>a^;3S^e<`v*jsH<)QK(;4 z?6esGp$8CvaB83cfC>Vnh5%370X)B^Nc(H5e+!73hL(<=>Q}ES7yzWEq5{#R-_zzuKw)cP9`+^@k*@ir48BfRXxVGzc{WpbW?|fB+$2h?$GK zw4OG%)3X#i8Jw*7YC<#uk$=l@-I$^G*-h=mEu7&3$hVU~qbmSRO$3vPbOog~d3#i2K{qz9+hAqXce#3G zHdW!7AYsui&YmNo&gVSyDm?50?tJ))`NJ8qmttRk@I$4**VSdtTyMtt!L++85A=d6 z%e`Bt0v)|V*$DJ!JR3eH&!6dNB$7#o3u;0aB=pDLini%anOR+zTx>pr6pu(C?OwK| ztwOmZHbu!w<&2#p%4hgT*W_8P2Ka5?wJz9wI&A>qg8W|i{|_`=wW7nP0B~g9G7mhO z8aW&LL&r7-i=>%bm`|bjO)s!wgTx5a)eM^rccMHPR zdpFZ1+fLX62cEu<@x#n%u^1bzTCFU4pA$ZF(YDQ2z2NHy~ zR;&5^85b}2<^>O2tRl7}iPiYPO~=guxTyfm{^$wc_C8;2QnW zePi%#pWm^#2W!N+SpL#!_Z%7{d*Ueor_ZDMjjlQ@q$l0Zw^^p9S?0=x=7Pl`0fE1q zMZije-qE@#CR~@44OZ{&^`FDm@p+N+EPY2bg!$$glmGnY$RxuTrLXw7K}qT3;Mz6u zx+RgK2M&#rj&r)TV2%DO=?_;tyU2?46_y^i3O_ypifbn`OeiM!mZ#K^G^NLxo=A_U z#UV+ctfX%q^40sko#9W%$MR!~B0BXed=wfzdL~i1+Rm%(i)@wkN$|G&G~RXJx5P|~ zqexvBq!6*2bM7HboH;?>yt;n8EJ7wkN%3@?wsR|K*SYBw5V*H@+gX$%%tIpfCtLP4 z1q3*>l7HZLYq*`ZuZcA;&?fGr$EM2|S9>-K7xOvB(L4B0@^J13cwdp_DRs7n#-!AR zqz((RAjRto#%&A2q%S)kNX5J9DK=8`BivRJf#u5s5kozi1DOVc!%LrnK_`<9h21~; z8%0U7MkZjFH%^(C3OEotsnH9@dlt;r^DU4ANMLaD+|`|K5Z)|Qs8mY--YsJW7IEv` zAqkU8Jh8s8u!&l*bNe#=L2?bF$69{IY+dd~k!pR@wa{jk*p_}`D8HU>g_Jj>?%2X; z^m|#jYG1V5pITqqm3LSUZt6gZuk-G{5yta9kdGc=RqQU_Gkk-BUvt-tvN=e+!+dN9 z4au3NY*ZeN*7P`Y-_leFl#>ge=)9FRyP#;ST{>=7eaxo+?4@tiO10yv!d7LY-ma%c3{4k-nti}xScH&dvN0 z42Fy+!)z*6GhTIN&~1!on6k=>=AW$d?~gsU`CK`Q_N&8e#R*fwG~0&b^)4)cjW#O| z)%Q$2P2V%+GA6}mugKQd?zx!2^ytPGnz#snNtLR5{Zr^#mi_Lq+WGzi4>RswG4U6> z-GVQ(l6+19AB+okxx_s6EoH2Dpxk*d6SSt-dhE_aBeuRP9NkC)3-JB0#kB)HPC;+@ z==()Zo#ddGMTDl*D9o+c*7kMSqr#H<)5+UKCW^T<+-LB zyR7Is=$Q7x+&}RcCvXY~@W3{9BxS5igWT6K6~-#UU-9wLzLLqneNTi6H#tFgsoV69 zsc5SP+MP#_GI@KjQStuxT%jhcPtLQp*9>%%xI{?pW3(l_bm-`3LkK<0uU&MoKeV(| zLY=@s#1<9Z^&Z*-+91D~68yzt&2zXVf|(mQ6hLkp`e^$(t9;hradxMtS=ofu^q^XI zWMn{LZqT9C`&tREVmZ|mj$vZE=o5i2yvoT<^0hu{+M@we++Pduym2n9vw}#Fh+VMy zaUtio;PI;x{u9q7P{M7kGU}%QKNhkjxWq2KKtQzXA9VSPJ&9Ffy5z@vEWw|n z9ngOm!4^}p-wVh&W22DMDRcagiw|kH6vq>aZ^4#qaXHi*#^P&A*OoKKbhxk24WR^a zsNkRoyKVE~+r5~{phjH|3jcG3hP-l!33)3%GU@ehzisD3eFeAi@d_`Ks-@*Xc-e;m zvo{K`ed%E4^FlYPSGr#Bvn-UmTF%yHtc)d3Q_%JyY}2nOLiReWCdr3clyf<2$9;(LG_5=1C^&GihVhQh#w24R)|}ci zO{)GmaYn@y`dL1Tm|RmkcOko0jE=BnbkpkF%ykV^eb?$em?i0p-IO@bWu6!rWg+g- zUL|inT9cDgfHBU#((7y?kWqrH*7#beyl@#cZ!7}OgaeN|`aBP%bwq`al!zk-S11^k z*;Byh)X_x$K?sBfeHD%V7l4~)Y7EcdlA{~E;nIXJJWo#nik+qPsSINz2CuLSU&Cg} z9JjIO;@pkJN492>^t`-pANVo@zDwvzE%E8A?DOy4sO=;wt5rjDkq)5@5~U7Fzy=oo zXk8ntnszgb%($7=rP!)h(HGU2KIhk@d0hRxsp^&(oBcFvjEn^1Oqm4J!E%xH18!C| zZR>oDmXh+FhpKPakmdnZ-A?y=ZeG{Hz+ReX|_Pjtw){P-aXG= zuJ++G#mq;fai5!7^_I%|3OWVo82%8u*Tse}2n|#C?Ai$wy!-q??xQrA?U8^0|6d#? zKkPQeX7~w;#IWnq6s9RVs(hBbJ2hz45)WT~>bYh#xgQbF9c1U$Hg7l7`>tCJE;3M1 ztkK@8kaB|k(vDG~?(Hc2@aoeA9dEM_dUrfrSnT4g#u>mEG}vDRZj;JNXl!SiSh>>Z z**eXh3sy%s+?Z}kus7a(%D`2KO&~N(Xt6Y$?Fr@k)SXjsjW!c{>n?x&ch zvBTG|%z5R8fhs*agd}+vYa6I^ZrNLEFS=(HXl=0?XptNz>UpGO#3=1GW80`l^*zN& zD$|>H=BVht8iju}#=0=Egvu%Bq7_*uLZc3QtBx@$xF;Fr`1h{I8I4lu$kT&hbg$`)#J4`b87&#CjS zQRc$_Rub4`vO?$99epLDik)I6_!K}K&V1)G{YDBQDl!rZI604pLo3;#Q@UiGcI(+g ze_7}Q1nwQ}sAv0FwRE0zDL@Kn4d?>MF$mWd#i)IP3$xx}sfoH4BJn}FW3BiEVYd7E z*@c-FPUH?NnI%uDSZ6vh9r{Uwd)%@js#V_BedZK!aTR<|G`T4WVxE>iHje}qUN~02 zD}+ZIr~FsSz@}0&>o#<;N8}@24#%aF;^4l3dKwVR=R+LVM1)tp)_Qw#&D;Q}-I9Kh z^y$o;g2b3rXFGyGH9c`XUCuVI2#Pm?V)b7?{_udm}VAP5yebR9<1D`iqK}-1>aTsd@1R z?6u%?CzR^>ld*J}*8H~}M#SaFT)0RA#P_u;9sYVD7k_-Z(lv&h&9Cm;)pt0~_hJ+b zryT5D8Kpcfs&WPMY;#&7TUyCyKn$3VZhQMVCb=(Uus?r$Aq18AWO5*OZU(*cM_J9l zk9zZ>q&lC6a+L9NOEX`@3jIJ=zd9fGY^z~KP&5va30N;?f4Rt0z}fdV+{!T+0NX+s zj(Q&1aTF~Vu~vIl(~(3XHFJ}~#;O4TYJLDbJkFKyf8{(sZ(@m5jiC2s#(3n?-FgYn7sk*tF3awfCO2 zszntoZB_mH`F+plobUNQ@44skoR7!-vs008}6z~vGU4bVcU zSy}(vIY;0_QHwQb{U*|7yaB#45@`@oid9QM_u?Zjru3nRnmX_w=mqW`* zDvC)-OMvL;=%9>HZWxSPil6`Lb*cZ_e>{o*2LFTqC;Xq}B^h9*0h&M@lpsNXf*C}~ z47%(AcmRMBpalKrtp7C-1tl0lMNRX!stf}_lprt|0;U3}|9=nY@2CJJ9I0VW1$Od} zfINNHJ@J)AP`3VqriJqnHLFlSWW(D@xxcel{`UVR{x9o)5JX8u0sdPOV*X1*0iya} zx_?EP;Zy=h4MB5%r>88JivR=Vf3zr>0d?TlFJ>`}riWobc=R)PG!zz@iW!fNgv1~! zU?|p9I!H9tI0B{_5KSX|OMvRFVS-r%qJjcuB*>x(re%GPp+Phm_vRXS(i%}|az$iH z(}kPBoDt7lr8Mbjq9bWBY#Q1bsNxa}s3uItoF*Efp;A?529n7@ApD}=>T@w)gNI`@ zVKm}WwOSg&FYO-q?xv%-G``fOl`7odNNM|ZCy`<6cOX*>ZmbO-RB*rw@>^2$99qVX z`?{Hb%irxvUS*08geF2A>O@l*Lw?;4J>ES~%$@6XR2f;|XB))&!rjUWRL{yAeOa<7 z5>eX&YKwvyzk*fxPW<~i=i2hZn20~WH!oiTRnOxxpD&!>-t@#8PaxUgdRQqmVQ03p zaqC^mW7B%%`Z{r=fmWu%Zych7ha8 zMM%7VxV#nJc87l#I9-e3Ch;S!Iw{;^T3XibiTj<+Itgw0i|PXes|WL$y}~81WWPtT z)4_Yq)Xk&$qG?n*?QY6~?t(A7K;!K?bY>(e&p}GfuHp!Ll)@P6%%k-WPJ^(#R}2`0 zJEF{e5B5)EDU9>-ijivc*~?uejq|6@v)K=9I1C&)*3&Q^>P9;u;F@5Zx0;;8-c7p< znZ2767#*zRd$p=`aGe~I<|TGkP~?tO){F%c^NQj5gzEy|xu%xvs3W5|DKF@VsacWW zRdl9#a>#hhUElNs{!wY}m(x~dl68SpUXS;@(z#B`+}%gR8#@msM#bA0zv%S`&I6GHq<6;Ye+5bdlj(@OpH1`SmbB&tne(-seo+#@25T?K}o-=qHFX0J%k;(c6BTFHAz_BJRchKdo5?`B#KN^xcHelm8N zUsLc>ZmQ_i9PWa^HT5xMU4v*=TAJiI4J0i|Fb?xUhIL?j#L^HbyPF2m1Za&iQei3s z(a#h!s6^oUu*h;ct#n3=A*H1zz;;q}lyYzJ_2`gCJym<(j|Y$5K|*@?zJK#D_aX0O zwbS?^o@hKXN0NFz{e0MAK&azc|6zFMom4EB1pP;fqprCt>(OCL+$Es1-8ZnHs|bxY z$M?2|@mEGIR}w!vl}NQZGT`%&=&$7b@7*R#USrXmv)bG<6!tjr0KgT3pQ*Q{bUyIq z{K&>fKX}^ojCTJ$ttsFzemWa3cgNy{eexvD4JP5wV|L4{wOP4nAT(r4~f=S z{Se4@y5s%na4Dz)+heq@sPR1B`@vb6xFmU4vtE@&qQHZe~-RW@%80a<3hE z^(VxWM$(y?DOSd*b(;5^w;rvKaQ< zc;6#p27nu7EJLl%i)Vt0oa&nvYPm@$rEFt4YQYtTx4(aOwg{c-5my`iewt*GlPX`I zEOGS4u_VUcruua~$G0|nWBe1?eIVUt%39JNwUoF#OO~Ceqh@dQe|AE%nu2p1K2CWY zc(@ouaNU@cfT54EcN%v-ZnRfCZyzn?|NZ^t(aTB6(=1u6S5za_h)w28aQB&0V>lJl ztkh!k-R>fdpjvSXjz$ z?4xdv7E9kn*-=ayNJOC2!Wj6+V1M=N5BBUOI6xhz6i`xfmSFhV+S@jEqC;`PJI~2& ziCyEuG|>Kw&oOHoSpX+^M@il`0oclBRNQ&k(TQTuZYmsgFCaEEV7@Qwvo`_ z#|O>}XVjAe6uCPg(!sv<9p)x#F>VXBYA&2`gBMSpdwC#LuZAqh4lf0KuhNg3R_dzg z!BX%w<+TJf&#tYvksSc~>9TmK;4hrv+GvVv67BAv@SkPmZCn;+qe9v9jbHdQoj zz8)(&+nHorI6xU zh=)R;H#i(tx@t>)^1bV%X>KfFTDYeYw0J_rrCBPBmEy9qjgOr?Q~S=O&QNF{NbjqB zJ?-?LkfIZ7MiIAlgN@fDrqeEI;wnl=WG-?Xq(Qn=TjXlbHaJ% z!eio0gC)D+LbWQ9sW1~^Q1Qt%(x+Y4VVBJP;(U`6H`v$z#DUCG#~P3Zqx#DC!8Cn}3(xS#Qk^o7q&zv}EK26c9( zeNJYws^9LXf);)?UcC4n3H-UBaMdw02ztC0uygL+1Zhf|P9v;y7~fDmu~+x5HSMLu zsQ#!~xz&{Ny|m)xvC@zagqk?%J)6xi*3j(V7bT6)UCk<=CDl$LhmJcx+hnvjEXHcJ zhhg@Rw!V~R?RTZH+XU$4z1zT6s{nw}OJlu zkIwaBrP=#2qe`jv1~oZYf8FXPj)I}lJzSG7;F8;Yk6U;0gG)tXX5)j!+SAoz$yRgQ z`(Hda8Y{E<8)o}|_eM-k6rIu^3av&wV0v{Ld|D0D{(bN+BJ-?zB3omp$?s-#rP^GJ{R-bgeV}WOWi5};Xql0(q01F zavsJ?n-ec=myI81-(iuaJUUUrRPcI~ZH~$sO(}o&G|aM${_g)xuO})Tr#xL4p&#tr z>MySzJyaSSugd!B%2a%HN2Ve#Q&<;8cnI{CT71&+w&nCze$ow9E8-M6G*b}Zq|N^_ z*VPZsB! zpaGc>Q750x5VT}sDmdTt3VQ3)z9jqUh#$432%3)R0Y|L&(ziY0vDcD^olvLj(ooBO zNB+j)A_9sJYgxn$+Qc>}m^;qlrA4cLi?1pa51t&49#$;sRyF701${jnA_(lWy^}3= zgUb+f{;u*L4rG(J9D|QYq-`(}y#8SD=gvsOfmgd^ z4Sm#SfpMRr>3cl0hc5f46S&>LRBF5a}`xy?xaGWCas~hgBka?QRclOvQJuu5I|?6eU5ECrQw7 zn=a}8)6&p`*|MfA>mbTKQefWSj|yJRH|HWdmT=1Sq5X7nED;jC#7>I4T@b z21V1G?~~ZgT<9K-sj@;ehRGjn?pbwT(Vnv5*O6oW%K2S5-m2E zdcq|qQg&?6)05yYXo@LAjfOllIlh8uPK6W&3m+N&?xccSt>3%SHeYD4nlP?tnIwo+ zoA-7fyW7F!+e46C3ZMm#GKYAWvJD(2;?h211`Cv5_CNFmX3ZN5bVBCvf3PI!KW+kR3uBBw6mB9b;GA9A zd}x+UGIt6LO-78YIrg3Lx7uGVTfeCqm;LE_G1us_Uq$FWL(^80QyD|e?c|$W2;wCZ za+R^BZH3$o3n=&Q75e_pd`x&V`)n~M1p>pPUS7NCt(8=#+;uSUdZpCF@?qPTFt6JZ z9Pwz{x4A{P4jcBn$`iSc-`;#Ty~D~NE;vUA`eqSj6 zWl>{SE@R{+kh^8%x-k6Wl~cj+(p2YyvruEj3S#A=7HL1YW{Uy<7>c4iq}jMoBL1x( zWvavZ_04~M)UdT*d#~%{J+8Y&$Py(>T5AVz5jiAlw~eB$PApLtxE!Z;c871Jl!5zP0Sn=)eb-E?kfLs@&wx4DQ zcUza)GW5?5FXkoO9x~Dk|7A%nVDCt$n4Ul%Sa{{DH#N2zH)IHiQG=66m{i0)3iJ+a zZdgZ`)Gjzh(GV)hJnofs#x3himzF1Q)LF=hL+4PT$3vn9`x;`9}Ch{by*CjPqKz z)W$_#&of9H$;AMN0;ctA(wg(8f)^HFHB9>C-(H&;MGoMa|BRsJYwDZBd~&_-BGeq! zYUH|adEu+GP5DlL@Nef&PH1ACxxvNV+p+gI!&KU?(^7%0|WzJY4) z4u#n&ZA;)iULk84DD@r3osj8(jV8g+ zyNzC&cLd0Oh&O2mgY~zdwKJ-n|5bMuNQ- z99;rCExFN8@2VIC2XJ#$#k2m|= zlZ*+i$v=9}^hk^)JYgIcVz4ZRg2!Cdx-i}RHH%eyoWD@zTkDj9qFu0>_^A98tK74MEV$lK zXt7b-{5PeG)NxQ&GWZKRq8}bPQlO3-6*s*lkgYew;3NF>>hg~htM0OVXOGH)7+T zu_(DxvnfPj^NBKMS+c?p-iTfFxQy56L^3YY4JE6J{&Gmvv#v0DDA+XPM0%C}r~IB{ zouHZLL35}}qTUcyHp|s-WcXuxjgDaRmY*-#n|T>Y@_94DZW#oLCK6Y?GF|*O@o35C zr$3?agP^orvg?lj8C_DVMEc;qZACJDun+v8)Wff`k>C|hhOfO)E&^V8v2SkX0Zwnn z(NX96kgmO^$9Dy5NMZhp8c>dS$nU4$3FU@k9p~z9&UGTUd>3j%^CpeqU&uA#hLBlO!dD>;2LZ zvcWicHPH20L zio|OnUuLae0h-KexpwCEvJ|l{2V1L!)6NC-okr hVf4715$7+3R=D=G!ih%1Ejx(^qNRq@gi~KG{6Eld>vI4A diff --git a/examples/data_process/data/dog/img6.jpg b/examples/data_process/data/dog/img6.jpg deleted file mode 100644 index 97943eae928a77b72819d867802a118376d36f03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3782 zcmZ`6XHXMrG9fhS5fggH1|%qTMwT0rC5Qq@b;h~`27mw* z6cj)Z)o(VC0t7yPol-zwLfMK{5aArw(W0G)ja27v)L0UK9BUJ8oGEBRQ`4d)ky z3A_+S-bB1S%Hg&|etX+Ot(DlwA~CHK{$Mq{?Hch@Ws7m2bY2;bpZnfsl+So5H^d;u z(c!XdaMbuqZbf4awq|C#XofC(N(=foG+Z7cQ*E7#@x;VW@gOS~UZyQ{WrHu83c6$a z`u1}Et0ttm{5U@M6tFMn{SzTODOBSdCP9Z6LN|Zky*J7k`K)I8yMFGWS|HPyT_EmH zo&kIBihLN%P#E=Ce*S>yyq+^g-LEJqNhC22L2IR8cyq(e=o3!06toH|Eia$R3nRKp zYPfVRvvPB0MYA*qIiFbh{;4Y(iS>>&E~yE7@Banf`c~m`pyF1}OFBicFrn+mRA)K53oR81!*y;+g3Z4QjbZ)pS@{?bSLx!DdX~jdgwEFm{M3VJG3CFyWE%9HFp1 z7(^*5BIF)Upi}hJbvZd(dFr2RXLQz7q9*uNM;fYClh-u!Fd;Ek5 zWXolMMqWF9Go#c~cJb?6KR&3GQX!|O;OM&S*D9L|1+qz43MTWZySh6{AzZ8YW^9uA z`z7)Smq1!c&SL{#4t7UwIjqL}7`Xg{QC*8#%|3`gp+EsYq)IfXRGMt+%uS(Ak3Sk8 zaJYaTwQO9@_LO;<7n}nP1y8F)QL!?x=E92ZgW%#Js%D0ZBoF0^MKjT#IiAvXV zMtgFW#kQx4!g2-@Wyv4$2Cnja>NqIbo6WkxK9SNfG`_vw%GGsBdvWBMhP8G4Rt4*1 zOHbT@pa(xVM{s7B#wEKEsYi-$n=T664+;D7=YK`#-+_ANkK*^XgpwsbdL93=cNo-# zIm{S8qm+^}fPK1lCkc-JX7_1K&X+H&jidDx@FMxJyf)$9b!{)Nbs3rfZ2h*9DT3Jx zmNI~Wmvu~-xyXW>vOP8|rL*m;kh<9Oi^KusRppdw|EjULQA?lUqh0f;-q*l_kYShl zKQ7dSewY2EzPMuo!!&V-UAi`7`!ZP3ncK`O0C4g`cIT>qfTG4P_U3dOCtW5gq z&GgQvjBoZhTKpB`Zio))t;NVs_-WXxZHec91OFp1{!Qu=i`Kyz>2i!|h;dd=&~wGe zeL^^yZl`*6eH9`x$N0)ya{Hy6UCh*r9VAV_HEpY2NrEg&KCmY|mkvsZ?pCN@$?@XB z;7$P-TW+tPm>HK7U~d$~lMH5;E2VU7pdX}*XVj_(x_GXZtJJGoLjVj7Z`*?BRWUbx zMV{a2s`M0liYqqnauv6)NWkec5*}?D1l^y|!i2qH+jAxgu5H38 z+T|@@4%(t0wU##V2JTyy`Pm1^+C5$8uy3dZ+xIBsLO1U-h+kim9Fn5gKQP;bCRw&M zd4g>5egq{17cq79dn(g5pI~{?V1?gSTgy5rbU@Xd<}behw~9o5sOLz`K@dEB z^6SS19OgTBRiPk1@)s#^S@>2_MU;A}* znUJy^B@40F4_vUe75;uw0{E?yRC}sD<|#EyvgUB7c-r^8{P+dGpxUfOkDnYPV6e}5 zmj6u)DLnhGnvfqWx8m5X`8XDu`q*t0akVtAO~iVQc_;CqbAm9p$lBF*J7xdI~BW3()4wh>a9lVIz zU3?8w>G`6ezMAB+9l4!*najf)?h*2+KH2G-b(-d}?**9_BQKIyyc9f~``;+k8rxV} zLR?;bO^o*FR|Z30|4+s3bl-W*`6SW0K`k>%Pg%55hACn_^RIKY#SPua+#KftXCGY+2;g zLZy!#24T6o@76RYI%n;>j9aP=6x(b_N)G`>ReRq0T>6` z3eXa|KL$gr3}?B%b~(v-r$5Ltd8@fUaV&}HGp>|FIO$N#^iu&tOAK?b@}%%xBJ6EN zVK3nnAVQx?b>exIn3#^V>6|S*1@Mw}>aN1gw3nk;TCsWIaT0wJxNqA<*;l>|0g`Ocne#s3u!|XbIEz-yJHu!g?Vxihs#T_q8tcj>FyV` zL@sfZJev1PrB^02U)BPVVRGosr_V!5-me=${a&vzk#~!S*|P3MyO$X)HgV6nnwm#a zA^cSYV$TUhy&_cD*^DRHBV?|zabhM0`$sm~mQu(u8txia=M*B!<&tT*-vNE?-hW- zE5U63L^Ul@*TY3_%#%Z+MclVN>tZQ3!m_-4|7X5*t4$pFyhG12^$~C1)XbL~LdyjM zhq~ZnmgcE+Ol=Bo0e84%M67ab-_3<>1^vt;BX}D3I3^EU6iA2`ZSUmwV!VzMCE!r^gS*PphP@HwDsA9Z{gJS1srj|(e#vn09!Ma0<$i{zBzjk5D1G1LMA8SYh%)+; zPocxv8+>11vuN(p9no-n6nROL43p_fX?cpK zPl6YTS=E)+u_#l5pDIt$l1g)~Rq#`)FgOR+3aOx{omM-(zlH-0d5tugC;X04;f|#b?@=F4@K0>yNq~sQ*RKv_GYuf84f24IBiOqANhtQ zwg@nb!)yK&b6Xg+EEZw<0{w*Pz?Kg-lkQv9xx3S;2jA*{S2{5_zi3otRfk_YxMX7c zpzjor9n;W0w-UrF#8WqcE=(R)Ynp=xD^YbDT6Jz(gvozGTy#chlvyO+&xm~d%qfJ} zTGi0r{4ERP?WHBz9gg1(5Vn_L>s07|<;g5%Yvh#D$lhL7M|;Wi$K8biD9{es+&pFV z;O;z}z4VLt6pSPL0u6$xjl~^VwPS#G@$PivNFfa*J gKbsXMJCBNhQVqd}U%*RZR3Gqz!Jhw6zdBgaW9@DHxd;Sr{3aSXmg^SlQUw+1XgwAu!0Lzz(^>a;f0Fc9k8%4`qk&NOJQF ziCl-n*S!*q*99qV{4%@cPC6d^d#@MdO7_+wzvVMnNZT#`XnOXXQO;F(V z3&4+<5Y_b~6tWP;-#f^elYU$)F%<0a7w`SIlb@&Qa?aL|sYDu5sz~%j91xj&?x!y% zRZoh;^`{QOOV7~6Q&2o@$u6(m@r`%q8dFUU27z&{=qK?W=VFJA3?7Y!6uL$;4Wycu zZcAJOZaqJdhczZd5*?;|QSXE;*FozSwrBU%i`m%3hyekUY$}A}`iZ_+akmS={h$O? zi*^XlRURJFe*tmEeh4_S5|9``(j@Zh+L_9|%iEtm zmuN_`Ljv2SdQQp(Y!lvB^~eR@DMaDU01b)Bo!Zzgj>%Y!21V-^swyryuje0Npv}FL zvR_Y-U5Bfq8o&1YqhF5qJpYn7Xw>;!&7@S+5}+if=s!QNBBr@gb^qpBwbfuTjlUX_#Z@eN1nj6@y9twQH zU#5^vk%gH93EejxWp;I+bU*s$aAVs75cL;LjGd@mqF_K9@r%a~Im65kNt>G`goKeB zuhYAu=YgtrnhGk4joDC;^I?@XU)LtYpS&@kZkuWTb~mfCgBJF_L()yc#DWII%izfC{Wgemx2K=mVgz}{?gJ$r`M1v#0T9Xsz}B}C%G zC1ysg6JD>f?S?s5!yY@w==tHt6eDK8FJl`H8F;HN7k|ZzQqDW|qwx-Ps zxAa|;@B$rGqX;V`4=6Sh#nvUDC-9ALD^H5TpLN*emIexCyiW%n%pXqBY`*};e|>nK zwge0BUUFs)>`eY$AqSjkzFR%U?-UUn5kdJckb5OV1kzk$Zhe(rwX|8$+MGX>HvO6G zO>Mo1n}FQsxs{o+h5Ehuc>$Q(V&RG2_jKWRvO)P2L`%I8Dzkw_?nF)RCUnxaC&{t@ z2N-Q|-@WOqk7>P;yb&-f;bQyzxJ-%m432!lIo!)~RwxCWA+*W+mzO;vm-II`!l!NL zZb`BuvM9@u>!`J{t~W2MTkAX(0XLF_4I|L`Eh;w7pa#W8bl=plmVK|YTBu!*=B!%9 z&y^~T2f%EzLnj+nI>Erbx?|Ck4N1Y*Klz7k6fME$Y?&D20bv&~phn9E^$n$uf1880 z^AD>XJ#vKy)^n(iQOfe23bEPf3P3Q~p3>H385aOn{Q2>B87zM_ON&dTi{I?D+;Ec^JjFI`wxl*VG9a@My0RFO&-x0+* zb@w-U;jq!)y($g?Oj{+E;~0Hqdmp_QOWkh$u5P~`F32%?Z7GeYNzJiGekroy-dEHD z4!J+y%4xhKF#V{gdS%Pit!m%&rgs;AI*q7%p``+TOW~RZtA0^J!_~?KG)7#5WtXmJ zgWDrfw%wwByx+ay!w;Q%Su>BHJqG{$y3%m{>a!$VR)9_inIV!e=VeV`>uAN0q(1A5-#FYxHS139lz-{&8a?;rVM$B{}8$S_Z$smV@$j z6|z-TSChs=zQ2+3rtjx{yi1FV;OSmSpUxdf^U-p65G5!-H|hdF5XZ*ga_wK$Yr?ft zvb7u@!@^imKG>;-aJ=s|X^*XrV*&oqI&|>CaeUbnvbw&@#nPVb)dgU@-6G5Mhru|Z zrJ3&6qtX;EgmZBF`JM9{=gF*ho!LPe1}|UL6U-_46TZXvFHeP&G(z6lb-JDG$xnm9 zuq*DV9vgi#nh<}{mrELfotx&YX;hUS%F}|`YbuA2z8bFy3#UFX&;{${#_12$l`b&R z*0igpZPPWITY?=v|;5t2@hZO(NkWtGh zp|To)_A5-maN`3vK51Dw3s)@EAM#lka0d0HH)NW#mtVIWn`9i^Mm=pvSjpXQJ8}#G z(7AuuUHZ7_BZz66QoPa=SkSi9mIjUu8xAMV5BfsF$2(5m=uJvCpJgDz=@4|r4!FXr zb9p--2Q`bf!0hEE^xNBJ5XNnyTi?Hhn;CGwAII8f#wbrd79!l{|CUHDie+svuGM zO0b85?!bW(+Ymtq#0+QX%^UPhm45XUk3SH~@zO|lCi>fPiGR@1y2Wq-pfeIICl0nSoo_aCYux`3~ zG#$2c@*Em%DdcG}dy-NHLY9!%sms5ADk`3p{wc|(PMp52QZRL{m;#ZC4@*`E-lw>ub6eh(*4`1#!?Om#liPkV}>WA9%dBwN`C zv?8xMZ2tDu4mH#@ojpbGR@F{ghsbqJNNIGvy>|SF=p%u5jmgS6%gXQgv4Hvg)2zno zmX;p2&t%!P0U{Py`*24YB1?TMq;C0I?c5isb;TWbx1v+``WnoW`=-WMW(-CsY5ww} z3oBb!{pQu3j`t`ou*@r-Gr?awLv2ty;^aG^z;Wt{HYBEUW{)Qyg^!_&jkvykpp~(iM!^-6?#X z7HXt1kD-uyjwQGY)4&|*F{wB2Dp}hg+=%|gVK>=0JSfdZrnlY1SP6Y0#Zh16Z86LV zwG*R>#2b@xGu#s#eyb@53uN)Neq|DMl&_>y#vqlEBMyEIA|)mkrgKW!_$ieQ3QW3 zZ|r>et&ZhrD0{dBhZx+yl3H91C+hW?CpP}-LXXPos?Ts2s<}`3OY!Tr3K-4d!YdAn z>J6JKci0PeGf=L+AEz|1#9LWB1(mOw*OD|oO+?qa+^JRmq8J?v(K-Q9Y}1D5_uRZq z8x4jnX}l|dbHC3^dQ58TvfU*$!~*A0w%Tsk&vPyMxS9JNyY!BSt+zzAH53+_wM}#_ z&4hfskv3KHl8wq#abn-vA*!_9jg02_U|ngd5z=<_J=JM)?m1liKr?xI@QZ+0Z-mEe zArRTTQLbktsMEP}0jL6=JaQ;d|DO>B6-U`@WecjGZlHJww&Fuv8g4n%;T1Je!NW{S zpQrHzjxs?j^nP2bZt0EW!f+dh9EGRyZ{%nM4cU0=?d{!T~=)Vp#>P=hF(9zYuH^!Hob&yC;h0oaTI&?b= z$zBz;`DqYo@8smc9h`-SR(Tw)qp{-YHc^^{Mz57NwC*o2*_98L+tx;#;S)`v>ceT{ zZ5TOuPreHP^}>g4TYu2^x*n7)K7gT9P0E+@SI)Cnzm}mN1(A+ty`kr>0oO{W$@yk- z5HIJu2st}g(p(`4E$2Kxy9mO;YIUV&IAZsV(e(lV586+WQPBQY*Lv{{d8O{S41`pxf+jr&^L3?&Wib~2VKDD3qyxy7?gMl-~mbp&h` zy~hF0R3dF{7XV~QXV#-g`FreyUx5-GwbRo;-w?Sq&!Thk0i2&a>^61(2iHPV zzQV}ix>{*hq%A1oq?kq7B4e+yAW{K-PriK<+4wt!AVRn$(`IsKOw=u_5RLh25Bi5+ z0d>W@DZF5tgw8VW7{0fHV7Y~m)N*pfplF$6;^O#3gTXW&YUxgUW?)*q6q6$46bV_5 zEVIw-wnQTJDM3E_;a1Fxx+2}#{TWaS0o*wOlM$Pu@Ok*d>HKlstHz@MN(trk@U!mW z#%a>(vpaHz9=I&LBIW7ANo6uBkQI0;~hapmmiLmJLJc45{0v8UZ zQC9J$5u4CV2sv~n$-B`8QET)8r=p4S8`Ivs{`y0@zdwEN_QbQ4F@Eki1&pZ0<7;;Z z+-QgPTJYnfK|{OS-r0R3t9@$&QXus;ow#>|y;-~FZQHbG?h;1#v4I=lz(HX5aNnyY z;l0AGAAdP+=Rdxwv4Gn&SZ&tS@;10L#bCQQ*S}G;lO`O+@!qUMjqIVyUNB)haK`-lS@KDO_!1TiC~iCrVmo~NbX<Opnz&N(fK#Kq{dn%Y zPPnWON)GllS~!HTp;EP#Pp6G?6RM8RILF3#TBZs44| zphqrlRe$oh9;=vWnX0aClJK983|2_G*$Two&T2l+iFc=lhxWk*0Fp!aWp)eq7|P`8s5o&1SDt{j|4+YY6c;o90f%Cw9RL6T diff --git a/examples/data_process/data/dog/img8.jpg b/examples/data_process/data/dog/img8.jpg deleted file mode 100644 index 8a40fd88b2432a07f6f7d4a958dcc0876be2257a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5610 zcma)gbx_oAxA+H^lw4NAWnqz2x?{m51OX8c>6B0ukaFo1Q97g%1e9j!UJ&W-2606? zmXPkae1E^WGw*%py)*YbGv}FSPXBT0xt_kB1E?QAQhfyA;Q;{N%>k}|0UrQTB4P-H zh86;W(9zM-Gtl1v13i?1j+TMp=0SIZ?=anCVT5up($n4*y328&ho7IH9xfs&!Ygr) zpPvWs)~#EV6qHbEYA7EkoI`+zkB?7G><0Z`;kp?BlK_Lj0SJ#3zz5@jzc)QCcVtM;L`$O&6%!<()`8zjyZWZ z&wz&y0^g+(B9Mn@2`k)YwYZ)E$U*une>~IYuUTW2ShqP+fU&Fj|fK2sLwS`jvQ=CPW@)FOk?!BgND(p&wUT zozdkumYG?+h?*s4aN?7_l|_Zi-lq1|q$#t&Ig6F7;2qKw>viAg)Xj&oUk-+eeeUyM zz6_YRa+itDD0_ODI}mOlVBYK%J2xjYcaC;2{*t6)&xsBEBTY3y9N!FE%-R2&IN(Ze z72R;G&s{1;1&PQ=k{?k&@+%sR9$l5dA#IDWdcBSn$)REfp21Xtc&CKpI9vMfDP^pz zbt?ZkN`fL?AqxAx_uZGG37SJ_ymm6!?6;{yqQ1zDMQPWFi&Ax;)`GBw%SxXFm=d^h5kNlyIT7yYvss}3S_^( zZ&;a%87qFmClzG5I=@~S;w;ug+RPw46+6~Xc@m9Z-$^Wz*VeiUtcW!z_%Dnh=bd=r zw<$MFIf-Sd-La>94dQOQx?PdOOb`)sz)tz9GwKVE-m6Yw+_ASScB4jT4PQ+)5{P13 zWXGFF%@ykU(|zW;1}L|g?Y&L5b6N3Me-1iL+1lew`&+}?wY-BFT#Ie^ABexJ-(unt zahUv~SVjXidD6T<0Z+)480OmyyH}&=j99vgIbKC)j<-oN^O0bUTl?IplIye7VCJ<4 z*MNR>*>YxEpz@ z3l{GKG6BxorATcl+|=VfZ*Bhh{1M`_6mlxCzwS$`78(+kHKi6E%06qI4l_ELJ6B2g zqh3|1(h2YMkH~Du{5U=z9qk{#Y}81P0lO?IDiSEl{Tt$rnQ52|aklz$b+^)k-7CR0 z|0j6QWVPC$K?LrwL{?Qp9u8p=7}WJ?TKF4%5&vc;`%jRjWK(>q=RM-FBw$W6>dLkw zY%uRknRHaW`)Z7(7^a7eTVRonq4o_crC&rC*138=8t;N?0E~fFL-leQ&);SW8Wb9% zkV{nPKZ8!o$Dn+f#f$s~I@qo<8(id&dwcv~dOj(x;P61NC|*$1M`eeF#;q!FvvAtW zcV8mVpDZCI(Q^Bq4RCmB=c=*QsC3f5QJ*ldhj!o?FCJX{u!nijHvB6Vt>*~p4`KWJ zx5IeH!ISO#ED+P?z-K0-d$K z1_Uz8-FyeQqC6aFq_+HQBNfV6NifyLE3~zho(~H{#SG#53kUo_=M4I|Wv`KcXvy_~ z*HuQlM|qy?9)K)_D9rD_32e)vc~_1Le%hM4qqdHIA`D*xSc+1col(py((vY zw!dQvB8Rt`|SdBE1ho%0&tpz~0}g-7vYPbp-w zv=*|20!D|;pkt*hQq6z>v}djC;GVAt^ zOcI;LxQ@NL+x6p%jPF|L3e~nKk5x?s?fux_*7De>AFdg!m2>wb_F@Qgkxd*KOiuuI zhSl#@jzVmGnQ9wpny|m*3*@w;H|Z?$NHrU$o=<`SeB@-c#hunm@9SXq`9n~maOdM5 zzZX*B`QvsSA;MfLh`@e0dW7debk=vN;9z1gtU6vZ(~0RtP&Y!kI6>Q=s&dlOrdwio zKmCVW+SY(c{J1>$Hv7LNlcFM_@`ZLkk{SL^_Cvkmrj$w~VzZ?BIgqySTc7koXV7F* zkp{{5F-tGT?vUukTWzpzL~xa41e4{ND~ZS~w7M-H%m3{cY#!530gX3oKg;>n<_6A3 z5~o%11lN>vxcYTH&F`lt1%<9gfU2$x*YPzr(q=2|iUtoEoTO;wVXiED6maA7LJGvo z;wV;gTRFJyDrvCWo~9br&nLCFB=r;gjJ(pi7o3S*LNW!`A^yE%z46?@P*!jZm(HnK2OHBzP z5(s4e;$FiT_X37bsQk5iFYJMCZlR(anZl1dzfN`xq$^H-3i#VTzr^87{Y~N!EkCcrjh>nxDaAOX~C7o6jGoWd-x+JXLA|O|rG^W$4~Ib6%;jEVmVq{U}wC z7aCew-?26Lrv;WYjCqVzCq9zp*e{)*g%-=GnM`&It9gcut7kttYDzhST3`jme=C$q3psKl?5!k}*a>eztVG`kb=9fRd}Wm9H$ zE;rTO6{(4QeS1xZlKr_8=Um{BQg^xJ`&D`-)pY&6XQ2#E7)x@eftKAXdd1C2j*e>p zH%{2D0vsDi+4Y&%ZP=yTA>p@;<{p+-l%keX=`0TmOjDwG2R_y%MBhn){I;nNXjy^N z1sba_E+~Hgl2I};V%@XTk&$XxC!Ox^W@i08T|WoWSc87C@HHt85!hG6*8sN;G{)Oz zOFSA|s=V;yB8n8--Fg?=k^{(}@4uRFf*0lSr+sn`c(V2y$G*`rBY#Dw5l$ zhH;cP)vyuGLr-*$u0=HRc5HhEOI1r?%AQ;$84u+7Q|L_-l)Y!SA8H$=V@nX*rkbar zoXpVVs5Z+oK2@nw5kEaJ@yGS~Ki05$f5Mq1m(EWk1C%k_=7qQJ)RPC>-MBE(ROPY% z2TgdQ!UUU!Yp)ezZgo-bn24qow>rGaCaeuy_IGQpU0UjGnccc)fCG8yb9G zojgR7Rhb?51&GUBZVLTf`I6<1`ko3(?NR4!yO%+0(AG7uP~cAsV>>bYRzSUfS^0H? z?9*h4iP{%ypBJYEo9k(qV5xRYf2^<8Ap7Mx-}4ZarCPzks$|6<7c~3=BQI^j6He7< zigZ<^NGfR9g&X@+7(8tabH{ncGnhB@js!I@YFp|Kq^JXwR|!aLQnH zS3?1c1oYcpjD@tqN}m}Oi3}2sNw_yPn2SBSt0Q=Lzi#+Bd(4W|VC?O12IXbXc`*Ga zDdS$6T{jJl*ps4`Vy2%2Yp?t{47TX*FG;n3no|n;V8H0{DaTgVPWp(;2Fh@tD|60l zwhEUMSn*LshBr97`Z_mi$|#Pb&0w$vqsjL47b@4l67+Vdxotq0`TS?jU$e^H8OwU* zwAt;`@1)s;Vx)dCPgV&6b8%8clq)=f8-j? zkvt$tNc*2t3$xh?L0UYeRXTz-ne8*;lrXgl6D;;=*m1P2DCmky(e(qL zo3qJAkL`F(q5W2Db$N^snKxuWoQ7^BRn(p|7y?W5guH;K2oRg<={IuNEksbFA<*&j zK_0G4Sr1W)zhi6VV?GA9*>8^|_i9g|&)<5rXxWU3R1CA!dLCdeA9!i3z=`A|iJLIm z$*?*sq%m&D!0yrRfY&vU2l~*y3mNaYbl169`1A46e6vk#ZQ03t{wZ+D)h@1KL2#55 zN_rssQW2RI5cN!MRhqw2t6T6cY}iR0##j7fv!Nou?r**}FRw4@iE3&}vV~GB(-R|pGB?EZ zA0_|iVtx#6QMrtC&BJD0$0h@B+#Ojdrbcgb2Ltnhioe}T*AxD=`N@}tw*}XJY*lz) z`qP&3Gvyw4Z$n~<1F97Bxe$rYcwSyb66&$TmvMzon~84oM&`nH8NQo2Sh@r>jkqp! z1QpXl!bNb1D|Np=99C;k2bQY$rpD7dcZ*h(-K)Fz7F0FE z6=iU zI|MTj_=E(D=|O@4&%#0*qf6BTS-;1!h*pD+;=gRc)93f1;wMM$X2)1vgd8N8_B%a# z_4nIOMoL2^RR>!#aVtrNYgTk`iJ}oX#XzQv>_wr7mvg~P$O>xz=&oPICt2P5$#d$6 zZ`rmFzD9p3mGYziu_@!%y1mhLXQ;TJPG7VoGMicBHRkLZn4vSd1}ZO}(5{C^_ahuv zkYPV#=YsK=nV<1ymnEyf2n>3d2~s^D7cRdvL?Pb7DnfHYvrmI|t3Ho%!l+a}KYwbD zm1HDRBtdKqIOEdd>>rIg1ik#=!gc(NDRGO*2Yg#d>;WdyeyTocW@Op7MsduQS z%Wtvh{SQ7P+O~Hn6&5V@3)-^L5?$t)e{HOUCkmH}3>oTn$Y5FDA+D_O|3=AK>g`q zT5uVwouS-SA9(_Us_DRSTiU!bgPsz`Y;BP*KhF+!f%oJl%4uEvN*?|2cH#iy^(|yLB$)E5 zk58zRBz&K{f8$`$@G`3`qUq=zS6X1`{)9>d1J_8S0_ju39Oe0F1`Ct~D;32`>kEqG zG9Gc`nH1e$?Ojy;WY(S6K(cbc(T={gTcMfEQ7!AIsQ`|~Iovnn_?@V`*^o8b&Jx?M zpEDr;ErcWrOsFRIBIp*;_{6!Fn zi5{GteUv+1hVa|T(XUT+u;$%^n%Fd&C36nM=euK#=< z!By|dsHv0}o_c6_D%s@hK8oPfr=iK8>SJ-r$K#uxtvHens9IN<0F$T<*7$2sxLAaFy6c3#3H8 zwwdKEoG@xO_i`xrQ|8=Vd_^fz`m|0%Z-#Y>wbv-Ve|C0br>E?E-_RvA);E!?&=?Uj zAI*+){d4k6$DD)+$4Ik49zYN3QJuq4OLx97xY}swUDcEdA}qD38i?vTLdF&*3$)co z5{@saxM*M%NSWn$+*o!3WX4N0qn%~gcS&l6&WnA--6viyCzULe4Gv6_=ZOw62^x`i z^C7DC9UtcD8h0Eqm)=3yU`QLT&Pzr|KNjj?&Ld9$==SZI^^aLBqNK3{m+=E$yi0lI zr&dde9apFKxE*8Vt8B~Os-;fYcci3sa%;K9E!dA(w@ZDi1$AxmL7nEM?WNJ^o{i~t z((;5#^-*!HukJ;Bi{Pany6&V*o>x0rqLY^XposjjSH>|T27<|xjWf*3Yx(>oTL$J) zZp;>%FT_A4yG(>FIJ#M8HOD7k8s2J5W}DfpA?r)kj0OMazffRb3Pn^HYw0T+dV2gh zjjv!hhj4hB72-UqvM?h@nlT2M%x~op%p$Ok4l3Fsm!wuNwP9z(TXr2yB-gl=rg9QL zb21kCwd+IC%!T1qwV(gZu3DKeSp+*4x8(I=_N%D+{5eI@{d+cOc!+IuqfnZq4So@3 zWhYAJ>5-vZGT3WD_>nHa+e`qx;wPTB_2(ku8hD%tq2ms!!gnnDBqL?ZTZ|!Z5GSF3 z#_*;wjzH)g#haRk3g4rH7q_%}`u5H|UYkvA3UuqENsN+-%DXiXUGJ*n%buhqzK%KPS5DAXe2 Wt>0)|FxIDV5K^kc%1U+p^S=O`9eN@F diff --git a/examples/data_process/data/dog/img9.jpg b/examples/data_process/data/dog/img9.jpg deleted file mode 100644 index aa0f6815ce67194e88a4a3c91a3a557f3fcbaae4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3492 zcmZ{i2{;?r+Q$=$HZ4UP1Z~lVklMvs%$U+iG*nVc?XA5^Y9Cd!(<&upMBxbM^=J#{j}8Go%>+2m}Ct z2L-S{1b7TM#xEc&Dtbyp^pqI*&<|g>BlQ;vDfk;B7z+hPoS*Rjh zMMXvYoTjd(vJOmH1r7v(Kth5-lET80%4g1;QC5a49~k_wzz^%chB|+E{{lJ(K<9uT z*8SfAB1ZvbKq+3J9Dqjz$SVTeZvsdk9FT_>czD)-8y`RK5dj|H(F1x(7{CMMIdX)T zkDrf6;Ex4B9$t|ne6s5NqTgHH3`iC@CHF8}0~1(UKYre%b7fVNASV6;_Lf`D!EVSw z)PvCfj(gbi@E!s39Uxhe19SinbN|VKJR-cZ>Y_)!KP4AnwLb_raY#jYMF7TtC7D0h zsP`9TLoDoJ`9mwHh1G}6W^`TaXIoxiXoNG%1!J6WJ8AQJajTx&9?igAmPG_g=i?zv4PILU_AaeVEjf8B$X#5cVxpjaR)B3^Sg zMlYN6eH4xBoxtmsAGaZ0RhEp$5~CD+Zd_Pee^d0*<5$}yiqT@7DQ^C|o##OqzmoWm zGa;p}?FzQT5ycKrRv7hZ)&{$`2S3+om^F>61!XZv#^ddEjs7ls_7|y&8ysmU=Yb(b{}?N zT>ij|Zw|VSORHKZ4Q;)NdYOqu8)9vIb9HfN?}mzH$@`vEKz}XJ$Jit?M>4a%G-3(2 zN*_)J;@x}6#qDp7%QSKwUp=NkeQWxYv58hN*x)r{L;)Ngw!jtas8TDisL<7k#9~$~ z>u|GH_blga`q&E>t#vK(0@MTVuWj8~W)cz@J*+3~`8ng-y_~G7eSkOfW`G(k7wVbC zurD!4-{eAPB#OUnODt=rGCe%ZKZ7(V7Al0zo|FV9J^Rw`w+@#QHBW+CZ`3H9tAaAW z4)r`{-Mt&55E8bVBgKAV!0B(g=}zz1nkeCv`F8FKMn5};c;_79SJCwwKk0vK2BfBW z5YF`gRBpUY-CZ#ogjlQDrfQO^KuURjjI^GJ9}m$(BZuz9VUy z&(*HwO?j^MjY#gr2t53q7SPgAl$R~eg6R2rK=W%5rSPWB`S=)9=mx2HWZTdJ8`~U+ z+F+-@-F_0Wz3Z8__Sq4>s85kFBlg!mwkG(|l3fy6i0N7{^YITF4Q}TyTo}jn2hZfu zIfi-F2^D;CfZ+fASx(pyhduljI;ijXwDU{tDPAS>uB`H3J0IE9CblXJv?nyO7jq+m z)n{mAT|KnV?t)v~;{A^nw$G=twpVM`aZb5o*7G+x+a8HnF*XYB`we|tdeV#RUHykI9EPC!ya?{ zhxHLGR+p;M-_C58I~&**-NRq4eqx8MH2nVQ<(>Dqi>laRwe2&KYD^6p9pZL$q+?ZA zCDs5^;Q7q|+3?xm350$SNsmqu`@Cx|@`k-0iqsDhl@t4bQ21w; zcw^RdVn(XxYjxq7Tnys;J|LktoZh|SHB((4>)zk2dJ?=N+M|&^5Vq{yo9WJMZqtYo z8sfGL8xWRVXR1|O2!YR}Jt};Z+D)V4eW~gCAo+>e|0m_q;l4)ogx{a+!M)Wy@=Q(QX@%br(eenCX4Z(%jp!nB>2xXRI=v)TD;I0SKkxfB{X?TW* zpfm#r?DW#Df0(3p^gmr96AwdY5658$*lgU~ErOY?>KT&bn_2>TK z+B0vaT3GsNCHyu8jN^gB-oy2pUpkPe#j)SnN!5GXhTr({`<>UMQFQgy9+*=6(w;%u z$T1Ti?a^2hd(HgRSEX5fF7}0_KqUlD!U)OTa(U!rp2RbafTHZ~^?xHuCRVkbYg_K8 zts&HibI3(cawn9eMdK8r`fyB6BwE@Qd#^}_*W|^2>$v&x#ZmkyKRqN{XnfvYJ)rtR zHK;>wsUQ21o6a3_+vu-1I5)w$_ye+G&#Bd}Z3S8UBnut82OW7+rLZM_YXsyMBIFzP z3(YfAC$>b4YH8bhA?=h3pnhBu1U}&voK5s#uprmb?n?K?!ae;LkqGx$*B2uF@EW? z%MCH38pDZYv%RcAI|jJOUlB*>-L@E?4}x!}p?WWlH`q>vMwHB=J6pO^yPPer%(94+ z^lGmPdHRcRj8o@q>3^#x5T9^m(k9bLXZi-UlvicivhCTs#AE9WLd#;^X;kvoC1w7d zvhV8RHGU~1gmwXr3m*n>Es(lndTSo>DO26@q&V|DA{1(!3T>`gSL<#;*V-q~*mebs z&WN_UC?yP;=+`>5EthlKaOlK7%;+lB;be4qUGkIEv$n67r;*mrCZcLOO+N-nHwf-9 z=~t+Y0*pwIal(Bk?ptOf-$an7F2ZdTG<|y2&f6NP=#ZMTQ*Qp*r6dK)k=Rg9YO%dE zNS}bQeS*Y6(=}Px~fOzBfFLCM4 z2qQYu`I^!BxeDTcDkAXiiYEl`b+rRu6&?Fi*B)%a&Dp)q~lhDtr0@#0;f>#hV z=kJkx{X^M;`-}pp^jaVqZk~355Wt*^*v=%J>h5r@{*fE1-frvN95$Rtxgtyt0sB8~ zUpF%~DP(Auw2hi%m@fAJ@fgXB4+QiA_$yrA4bGP&$?I1xh5Sf*;00@e2b}^8TyX%61io+!fgs zMJ=PJOb1@ihTy>MGi?jcvk_04e;03wr}k|c$w%ss{q-WetSi};pCxtgbz#CV2|GSR z_3-unY+T{-{3;TS3pM-pDATVDlVa5g&)c3g>(qn@_1%K<#XR<&t)weE5Hs$D)1M-3 zuv6w9rKS4-A!WTO#i1%jr`-^hI8d0BFdd4mlYjTak`wY&aNm<+altl!xpmh+vID2D zU0nk-&Axb<3*&8aO>zaO_vJ$1Nck$Q^zAf&u=QqW%vM1c<=7g5dZa&+{s~{uK=_te zbX&J|*?rJwQ>X4J?o}zI$g;SpL*^A*Bi_okyJm@tl-52)MD{&YMx}@r z6p_Y6NgE;**|PJV*RAjK{r%p*-p||fd8TRZanCi^bzH}BoX2^Ye>eXH6QdcM8Dj(< zjEBh+gkke@n3%oqp5*HGVp&40j*FhL=q#X^gh;D5wK#s2$8LPA1PQW74; z#3Uq0BuUBT%a_9o@RyX7l&q|bjH2RdRaMRT=Y(_&Bk&RMKiHo?1Rf$UAHRU$BB8|u zjJR+O9v80ukBed$4}nPJ;U)6%@$$lL0^u6QE6yjOps|gAsm%d_RX(H*!BJ^~ireoz zk+gmDYqh4!@pFrWmMtesNh_^ct4vYR($?9yNmtM4Z>q70sTs}A-oepn=PtVI!9#9` z-5DOfC;U#HI(^3f{Dq4lmo8snMPG}Fjk_M7keHs4nU$TBo0osLgk5^?{)4iIPoF)n zuBmgs3zD`+rQLb5NKw=Fm!!+_H$ux+YkgH3LZik9{r@)RbN^q1 z{&&p(4tc%`6Bi_WA=(oOt1uo3j3|N4w_(CW0>&dw6vqs(cQ%gp_8uxe`&U1I{rden zx^jA?APZ8SQ~Ssj0qun~j8jd`xV0^XHPB(P)p7^xU9s<|amP5e${B)8H1^II9 z7+j{IUR-osq@YnAd@W3nPN(_Fi}C42$1w?fY9wn6eSZQL6Q7mNBri!%HbcXuxWmv& zR|v)3urS)L5@p7UXIVH1Hbvjgqol{(T0&k-RU(DR3(;v57y}xe8VQXFrq2*tY;H!y zB8aIa>z`4;`y>*wlN- znAUjgDL$^U6O&-k zX_zxCSIlz_K9UVA2tK1WR3(fk1Rx;b!-uCd^b{tRhulY@xWfd}z4Rgl(U>zeg#Gkx zn42ylaQnRUGF=Ea5HhTOMOIH6=BL=mp3qqA$%9<5N;o;=zPyeoE?uD>xQ- zGqcj;u^RL>Fv0~}mlQKYVXBI9W+C?OfVpKlQ>@m;GAosrhtte(#|R7tn;oKX6{b<) zvVe`5xhaMZg9k34k)btb8mf^NrrO+;S02qHMsWvp3Q^oMc`%qvj30exOpr!#PvuvD zZpLjGU3e%_Io(_wKKVL+)<=8gyU})9V5j+h6Z$#vr{a!s$5p!ul->K1i~sB4X}x!xxMTn1kat-@u>sI z!rnlnwD16fLE8q9g)K|A%&`U>aNA%Di{P6(Dx3tUjFceeF(_1c5e*&9HDDy_D$I2* zk0G57Q^{NyJ<0aXADY7W0gmjdv?Z_}?5b1=77f0{VUDd0J@^6I-opH767h{d!a`sT z%}o*XVG?1z02XBQnpGw*6UQUwr>2vP)(}1zW76qvhPUb~961dew**HV+Our!Nas+d zr_k}-9~X|&Li^2(RekAk<+J6xrSAzFecu^m@ium7j~YjKcD3AFPsO)W5#yD_q$z_R zC+FJst0q@V_DD=54n+)P1RMUWKRhPK$-94LOssvF4NW8$bQtY8zMnskPefiiQu*UD@m{A#%Z+wA;4jziXFVfvXIq6P zOqLutS*h6h+H-kaT{ycr#(&(nki-n>l3~$Ds)mf4w^2#f3b2zHh@#Qr5ZGuI@tw4= zZ5GxYL!85MAtSn?05}m{krdb=pbWzNz{;@4Vq(}aRR9%`3_=*%iDE!^i05&NS3-Ll zSn7X00Kb3$p=m`szmHcQ2LYzQ?Q_quB?$VdDFIIjz*53S0{;1TSD6Ct8C=+DY9ycw zQViQ#9`FR)7)D}^5n+?HIjfTy>`n_UeDx`poey!E-3yuRE^mOqF+G--n!;rlpd^N{ z5v({!rvRrJwkFe0F9F}reTUE|!;V3EL-(>PuL!A8TwOTVH;*lqGgMa5@Qr5mDbh}S z+*Kp~@zL@t*uf;io|cT_-E9=1Cm-(SNlo49Ql7_ZD+-w0()t@Miaw^(DoLWX)1T?x zE1$_FR|;{$*4pkfsh}n1kfQFb5LFp5Q`0N?@oQ9Zdt#_l!9c^KS?`YTGs7}+UmO(# zxQDxEEL{V$?p%u)nRsh(tyG}CQ*A;h{{+|dptpmxdWIyqcw$ZU`Sl~?PQPkAsEb

wsSL?zMSgs-JnWRUw7uBXkE~{d5mtORdsN5Jpb$3{u;`@ed@GX zKI&ESvbey{PfdDQCx2DN{+tVFu})Y%vE!@d(vd1>{n+HIOPz=r_un2ZG*-N6FPqSA zpcqB}HIZWz#;vPg6<&Sv+L&qtSGvm~CcJRcB}Q*by7Q)N(x6|lZ(c@YL2!oSU|hqP zNoIgr%9UH*f@X_?BMwhjo?e|LUU z>{`#gTN6ed^%sj@$h(xB_%J{#@Of(1L;b63Wn)S^#QMi={uUQe_l?{9kWeG)J?q~x zklbNjdsAb*l4GAb@$7kl4@}Q*M*_ywN4v7JD_%F=dVRYvI7|6uX~CmPS8g8VatGCM zr6BKFZKZ!E+Dh&WRcjyiehNIsjKnn-*pW_iL9(3PjBpN=h~Yax81anqA9*1QBhcgj z(1{NLbHo8HT2Losl#o&};Jxr1Sc(dSN(4m%Vf^F50G5At0&re1!k%3&!X7x9)h^9> z6z&Dn#fbAjB>Rxdxd<9U-8tRfgb_RK2Yj8LKI;q8%U-tSRD)@efDjw2 zin{H;?C2{sqY|yn8Uq5Qf;o2#k}+&;+_O~vtgtRCouM&FAKuGb^F%sl40?t$u?+

(aC$DNhh-};q-~MwbFC$zozS)y1n8arWG!2Os=H#){Yd`rh`6x#UUb=E`?&eu}-H ze-&HYH<0J;*%GeNnm9RQPjYLxld@EQ?Mln-w;ok;zrVUZDEFci5OgN>bn1dM0|mw` zh^{k|X%OHAd~Qc56yN^Gk0azdTZaItS@Q`ZO#g9`WE_s5{wRB|$| zi!aID&@jotOz$oZmLN8Nla$N~Evaev=KbYMQhvlxwwCZ-hhH1ctc*FedaBiVSzkx{ zI*htZ^!53;syUf2UxETKTmEH_RMuj*zvTPr_N*+(e$0+(Q+?qe!GBd*V08)hC_Lz) z9wB(*Z2(!It!dTZ6N6ux&0_|sMHzeduX>rjrR3#`J93|RWo9+2a&IO*&a6M3hoAt?8eHuEfX`U z8O6(&|MgF;*q}|y;`qj~MBO{l-No`?+KP^C3L(XS_Ai)69^jR1Ko&6JLdA%~^#-aa zad!bIP{IvIbo_$RR9G+H(41b`JX)o%fbBqKu z3AGy7N`Nc-t7Q&2-et*9v+y$6ecQ|pFCbg#4wrB`1&?fnfV%LK4R~jq!4Y3`ticC| zq*wwz!{Zs<@r+<2DJsd1OmcZcRdVOABH2KB3_p(>3*^)PI7zE)oWcL#lUoNw?%Iq0 zB`VaS*4Fa-bVPyV)Y7Oar_Cj420e)b|Imy)>;ib@CcYQx*4C-^Q8qvQ>*3$lb0mYO z5fy3c{q4VfQXN)YJU3`QBY0s+c2Y%yKw?mF#fe}Jv1-#4m+3u2$so(oa>;dF zb$ENC@fzOFyuvl)%@w!#98AZ5zb+ea$E5bDUg@e!|G9Oe?6cOPyvwJb8Pr#O3-fp; zR)f`!6>2qK_;70d4>6w)!@ZMD3ew66bcf*NSIwd4!&uKkMY?3)ezMAPJ|em8{-A5> zoC8Vc=KfdD39T*p`%4-XuNVCE1=@hTf(%gK_J|1P!kC zee}3__d`#Z=;q{80ZTqqMh8!{?(F|Y2<4-8HaQurEN&b4da_H(O!4t^;_B&xDzlb{ z%P+8x)S0)t-KDSgyQe|+ij*sVA65A&GEVVVqk&KbRdS?sWzX8~r}^w}eP^9^jX2zx z8`)U%?0rXp3A+oxx+}PbfXW)sYEq0aKhCgtnW*u)^6l_45 za{;%zyh!VrV%1scKzjtJ@*oQNQ^MhK0E`5RG|McoDrsR_&#bbg(_%tGI5t#FBRxuxLwF;Yd;o$;!p&2pRvCyVzD zDHTmDv)*7*SKD-Q*7N1JJvyGf>I0)+zlW-Ok6w=&-(>R7*0rZ0|NdpVIOg`7ehAXZ)SdYHcQ7ZLhH$RntBZ{Tb*$x;4H6|UI; zR0142Rj{|yMSg@5B-017-% zUm-swaH-ol;8KC``n)y)n!Z|PlE5P_V3r9xKWIhli$_R^DHWJ}I?0x){7|{m zy)3~?3s>c87*PRF6$Pa(%UH@_M%lp3c+Li%7pg~j z3^EsRAQ$wLEpi}?Lt&Qf24km(mtNnGGZ>^0`}Zw98KO&;O^x*YE;KAE)^=w3U1#Tw zja&7P&OP>gTUBqSD7>`P_)*69yH^;t`i!J9i^6?2bMA8{%W9)68%*oZTT};BMdTB{ za6dfA)$EVxCXY#CPRhv)Dm6-H>ZT);| z)!oSVKMu@e7p&yIxam~NzhCmO`oSYYw|eij8O6zKdxgD9?M14D>MEb=9w$CLXY5bh z(Nq*RRvz9d9RA|$`HI8qnQzBd^ywaTiOUew^}FTXx@pa$#h0JF=-+jfF=8iPuauWz zTrQ-8d3vwulw@5xD(S7u&lm_{?Iwktw0;|=ox3*MbFau3mB8SOF83F?Kdn{__553< z|F(`#a4)ND8BgA#+jEsy3iA{Ml3dab)~LD-rp27v5N2?;$KJ+;a+#=|PF8<$Yn5Sv z`X20^c8_Zwy?I~gXo1nq8zwbA%a(q5P7OL9lrbw>P~EgMQIcU$y8P(Alq&^PCCL$@ zM`LXlA8q{xAAha3Ao+9_4WK}>wbo$T+Q8~RM^NKc&qU$gf)N!0Q1Yuw0r)J8plU*- ziY_cOk!Re`(qE z64oJZn_7S*9)Ci{fZ>ovP(mh`3N@J*%4TcqEcI?!xRTo#S=rLgw$ftU6P*_MQl{=S znwM1SA>kM4EET3R`hkU_?N6kK=v0swzZ(w?)M`h7=dfaqnx^Cip+50%}mSrR2 zyUtE6xn)pZ*|FJ4ROqY6Z24<1dA}K|kyP)2oA-2kA3G+r(LX&rUD1-@M1K+TH9yMEozk#D?Ge5yqzaO2;4R?CDNmF{a<=G+pq$ zWaArZZ#ArPja+XZLugQ^2);e%Vt@F2xF=PvE+_-d4rW(&Jv4z?5m4P zw%mwz?h{q4dpf(pXC6y){LyH8L9(mx8vGt`W%&0Q0&lANV^b{ zlc60!1R?~FNw#A!vg4OvAsIQ+2eLOrHbH&gX_M$mSt7`Y$&P1)-btfS*w*S&2{0UW zOh_5!tSodt*jnzG*uStxu_~krh{F%*XC7oia|MzFNW)Nb#if7Q6<<%qsFASY`x8$r}t4nxr|JsRNvKe=Z zl8>%^K;5M-U@P#CKX(UtC7+&l^=|HYceBDpT2sNce)(UxOfK_@MDs-xTPR}_L{zCe zB)54~tz|)D>9O=VUyIc$Ze_jNy34i~9HrErBnwQn9LX(m^IR$T_Qwg?eU|Rw`~urJ z4<-hRi}t(J^bcS{TkG8(a`_y#>B?g}iYv~%tNAH0)TkP#8u)&AkMTDTx%xAKQ;RPb z7>(OIJ-?Aue$CP7dwO(vs=gKTm#Ra9Ts{hDfl3AajWTl*nZ%AbJFB z2rsoSEM!2aD+Qf0)bhjdu#>nfIvNmNoo(BlU|8jDWOL1GO@_9it+Yfu!$Z@aFLnD; zi!80S)OGI4*>5a(o@w^NhYFH$LZvHbWdfi{kjq0OAs7f3c#;Q-D9Fpns10YztAI*- zXv0C7WlJE)<6DX> zPI!q+;idP`o? zqSo^b<|^-HWG8aGt4&i~IoI4Q*O;#p-{e&la58^fP=9P!n8%ZQBZNl7tmSS-O>O!E z*~cdiwjQ38nmX;f#&9$L@qmNBC7bNFU&NHFR`RKxnRrRDcl;i{X5ho&Cp}{a>{bkD zO*&s2`*C{dwV2FLh4SZAj)j)g zkOdSFrnS%k!95u$F6y!&GD$~(sO8wA_7{U~#|U*@cEilFfk=Ly+pvjyCYxxqUHT9u zJ24H6SsRRwB21%D0J&LIk}Xjk4aaan_CgR~FDxi9-d)1mHwu?s^SG!XRf{M0rb4Ke zP`J!*I=%dJsmoE7+GBM$Q$x>f6)b2KHg^tMVifuLv{PyD#k{gpK~b&Sx3E*oBeLth z)|NH8xnHrj|0*JKF-(2xoLbI_Gwki98KnMXy#Ohl$5K1m zBLiFHnx@WMQD4++Zztlu0b=U?;0;UwUwwbc`c3t4_$jU3)+_v2hF;3*}+bb*O=ykg_Q^e^i>nR^AtJ*TTjE3J=XCe|` zl}LNUZY9qlHHy3M?hw7bM$2^q_DN+?6_(9u4hLL_JlQ zktJ+v3?~|4XS_JzQzBUt0XNG3>AoTMfjy3&6QD1&s~4O;N>DHiwLeg&5!M^^HaQ$} zh<>sqsZ??G!5GvakwAGdT4%h&tHGd9G!3g<^C;|uRA=iS&C1!`!OW^=cc?@|>7ktF zB^qH!o!D*8*3aQ)4g@1;%qf$F0ao^e%YUE^fWj^Uv;ko#b36yND(2FB5q12*zClH? zFcKa^KedfnhDCRES_U~`uXpg#hJ8J~#5hNOA8*^j@sobzUP0TaDcJQwO7`Pf_9#XE z#XPolN;wx|Jbw``-5n?>_&Hty9vY-?Q_cZ2fU0Al^^OI5A+jAX#9W zKz&PTtme)*r^TB-jPDe&ACuc*vv&*SO#s1ypICk&>Yp-_o^0woX5c!Yn#?`nSTg%# z*Xf|4@3S|!{u_Jtrpx!z&6)+Gng!A%V@*HqY3fU}i`!Ftcrqy`Dz|#q;t40a-0zQk zwe!FEmmA%G7!Yvs=blCaZ_rfrvFqA(uRUG$G=E2p?7kWr?r}AB4O?=%{5~rIyVjw6 z(VJt+rpJx^!%SntXf2D@2oxG;MtHQ|-&~fp_TXn%y#a2f?^};#-G zreZN|eme0a4dn(UQjNNb4OKFp3>766BrT3b!Qd%`DZJ1cM~(i4#&j|ydkFauc<F`s@1X`?r6TFCJO3dOF~sz3P@j=P_fI#@h)PdcH%_-;-$u))%;Grx~@X_ zS~b(ltnsMy zd^W**H@o+#MH&bBXRHg*9oSk?aKqU?avSflZGEbB)%F#s6+2yehhHZgna4E0X}&U# zc`I0LV`gSp#Tz#@hxAa_1_Ok2~i4ly7Z6QIaCL&4of+d1_w{ zZIiL>Er(!U8LoEu+S+`l z-|d8hUVA;Gf20o%hd5!fhKRBNSYVSDpaN%@AnC6<00=^OLjeRj8HOJiawecR1JQuy z@l`3(g)uBF2K#ZRs~4tbjYH!C^B9i;0Ab(BZ;S zp49+Y0bL(}lN^$f@DMxq6W%vza9 z^I8L~-IELoR3iLpN&tCRAtN&Yto5zm|h)rL-5-m7;;b^W$8&)Ro} zB|3i4s_}8kzAsa)MMmpRX{@`GT=z_V9@~*SxW}y^dtkOtB!Cq`EB9RUu50qh8M%)J zpX6FDtTe9sxbjK1Th3ReVp^|tl)a)n$uGaPmBEu9+rD#j<@PD=s}AjT{zBd52?K{1 zd&E=r@q}0T&$>Gkjgv;QD$)XJ$so=yL@K5ZdCBVbVEk$gJ7cm3(w zyEBb?gNe63-_ZBe$<4AAn{zldvZdX=tbiNz2~}h+0?K@9I{$8G$Qy97qG9MzlC_2~ z9To=zZnA~*dcaT%C{rhgS>$MMSl~QiOgS?h!NZIS0q6os*>C_2KdJcVke@Ieollcr zX^3nafY`>=8Q2j5u4Qoc1&9e9dU>wFwR1;D0M#~t&Or`f+YPUcx+NrPnz|^Tb#Ktr zLSt~dVT_*>pnz9Hg9DMJ(P+-N^z#ckyQkj(qX0I*6)GDTZXFv;;VY42h$``$1C%9* zxiDhHtZ3`t{W8-Awp(1D#rq>7pBszByQBBMe|tVc`_Q+O3@e$D&;WLL`(v4R7a(TP z`FpUWr$3@%F!4cQl5bRK_ucMIPp5`GcA7b~1q@ft>==_AP&(uikz*da`l0)-+bbjw zSb1thkOv*cUtG#dJsA4=eXo`7E%pPS^IXSs7G$@m?L2iS8}?s0u5!!#5qVU*Sa|oH zxu(uvdCL5|FELfOdBk{}-GAI^NYdx*cl|L%?5{CWQ-Iy29ZxgJ+8AA8#VQ`vn>VY; z%VdIRy>}9CE$8I~h^D2t8+%=Ra$c;yd16Nn9Im-9s-5WdHCV=y(QdD8N? z@uo1!j;6Y!(Gu4WzHbOS`OIM+Thu6Fwt8$!B8{fnv-{Bz(W~A+9}jduozF57-wunc zaG5Xt2ENpA?hTa-s0hU=X`%81K?RjFqXN4DgfOCbUQBndMK-44p8W>NAL1dYEO-uP z9*!JBbq|<9r>0=-k>{d=^eu4GD8x{3f{LmWjlhH#lt-DSc!p=9ksz&y zl13=FN^qVecAKW5un?DB=XX#OqF%rm4*`Y(iIo~U8h}g!C@~`45F*d9MWeSyd)9d{ z+J)DioZFrj^CP@s5ARuuzhaJnEZu2fAfRkd(3QXbK6&WZx73soow^4b>U`#W%v2?L zCK+@|r`>0>|Vv#yUghv90~vsgAUo9qlUjbXR+i3lzh` z-;=xx1*!q$-B0=ckqLVydCXXG$?I`bgV(E)w%J#GiMm*8W7i=2qG`NFv_gX^v3rk% zsm;0b2Em1!idOGAaOZq2F@jUpFT2mEpYGw$xn1SfR2F$c@%)2hDo;P^>-^kTHshN@ ze_>p3nH#c9>XGD&1Ls5nD_;~m-uY7Xhv!no+(L5JbononE-EzyqrWbsNKy#Zj!POv z>5*g!m+K_E{FBaALd!i~6F$5(_30nJY0{y&r(t@Lb69JnT=8^$?Qqd6R+qQpD%A(` zSkH~Y{o~tS&z-9&8`9&Xq-9b4kMFbSs44iGKXo?yZL7BQ)#+P3c@*X11gT#cfHWHJ z&|vbb0f8t8fZzZCVunD8`!XskQ;uRs_kQGtMW^QB4wwKnf?tK}8e5zW>MkdEt6n2bASUYJZc60Z28q+=7`e z-Lq{H0l#k&50OF?A{t2OErFyuiZE3)D|^8Yq7S2^^b}`d;71)m-Jcu{S8{@5vQ2mL zF2ZH_5xYELLw(pEc#sN6hePT%rYL$dwIb-pkYd}mQ&idIZ_aU!R{Fd*+&?ty@dM1R z%5=-hM*|O{X03uYc&1%gv|51kIGRZR#9nDUfEe}_S_*Har zotaK&`w(3(>5;X3U#(bjD-eHDy(`aI;p(2Bg9C#uALoirXxwj&N_o*r-8pqD;OF=$qlalfL=Vu% z_tu1~oTh(ry7DlumV46Ra{gF_f2DgV>&Nl>>Ab+9Hw8S8J$$~!Tz6{>wfAy(Je49~ zyX9I;fKK?2rzvYz?f=9D{ZWlPoCJbq22cR_f!q2YjECTcz|{u4$Bl{kW8YlRYQoXp zO|xyRn`e7xR8(+y*as00~ebkgyTF8Mv*Nk$^;Y)J~I16gL7YjbR$eQ8#K?tAh!w+XE#o z{G|*D3xy5HM7Ki+yFpv|F|;SZN7B7W!dRYFZtvU6uZ+#iEwb{Y6c^4tg`z`Hkig(o z`C^ClDF;OY?>H7H+WkYl-%^@cY^&NJ{ner0r2E8y&IWqo-0;S#f)0gmrzXg8DQ7B0 z`}ZEY-BGp~j_c8Xs6GA}D8w7Mlk7BjBXaY4%AVL2q?xSqL3`|%8)?1>T(W~Fu1cR9 zf*tv=N`IF3{v@&G--Y9yJ}MN>rbxJ*FC8#Cj25y7zoe59r6IEy^tCK z74?Eu22-qrvl^6-1W?NbQ>DN}Y=_}U0fPnAAco#a5;h{uHrv>}HnM?3$+?-RX@r)G z5DvPA;M0toum7p1;Kv|o`R^kpU}FK@C#WXjt)%q`cfkmX-B1G~Z3gz-ti`TFf^6HY z+@NqdZ4_z%n$c?45$Xo0AY8v^gP|w)?P*p{{R3Y3MGSX%ju46g>I--^bw%6qLHYZsQt(h8IWAdK$kGwHv$jA=GSb&MVKH`;Kl z{QIMG4@-0uzudLnXH8&lsHwQWVoNc#>8zxcLC4hF+-X}vu)oT8SFbwN8yC7pb~l|5 zo?uy!s~)j-@mn`9-!x_48D&c($`bF<-tUVI3pF*6erIGBc9~eA7s=sjoagsd75TXL zwqAPW$gSk#YS#};n=+Mo@`{!@QjgIR&vgvb!T|OX*PK}`xU#|j*!7q5=7Dxn2$;c``cpeGq z24ozV2yTjin#>KKtAkZrph3tqWT7MY+kh6n5P%@VjtelNC4@T=S`lC>M2rA646zyz z6{s}iXa-t#kc3bH2!He&=TH>s;$ZnR6eN=W;lV#ZWrN|vM6((;#DQDO%65yxNzWau zx1SV(8AvwtE#=HmEVIDP;NYO2kV$#FjPW+|Rh?2+ZZDU4?h3_2&re<+coX#8wAL*S zl9bD?{n3wgBWLLk%UhLBwa!h=`F(c=^Co%jQ;_1jdVNaeYR6p_JEDZwAH({xA8u;j zfMs@WW~F@HI`qRc(zxwy^RDDqo-Nx=9`K!%>s{@i)Jm6G=X~>YSJ3|4BCqCI1=8>IheIXT zv8QJxZ3LsAL>p`mRF(DCEp(dB8NO8?HEvRB$1fwPhs3w zlO|yIzkT*DDd-aKeciK%^NZ@W>*dHdxHrtXD!rrAx0TT9N|!+#dAk zkNqRVH`P=lzfRgq{|3bSP#G_={Y!`AH;*~ro+>ur6mxF3myh#G*Le&@4<9O#X8lSW zTt2z%wQPFIs!8wfK^GH(6j`jtd-7Dxrb;>%C$hWB-+{uY7%~x|{p-CJBn~%g{9yKx z4nn#BqKN*1|F%pFNZgOJv62RNrtD9E7gIv?hVFqO`GO3WYsYXWK-GB&QUfHJ3y)A~ z;&7_9vA`RW00G<@{vk*xdH**Sp^7NWhOPq(zcr{ad^~6-a2YC0Q6b6l60)=~tPB@nY_pbI0(I57D!Ux}6MZk>Z18&ouc&C-?u;B;5Kz!l4`b6> zhza4YW>p5XZv&Th)I8J`5s$c2bhal_Kf7X!1Se?K?ft?PJC3fS+n>Got)TtJ-1v8w zn}UVY6m_R|m)o+X>s~9qYfTT9eXtUH5-CS(UfVUJ65jrWbyu{Gx>HbOyWRA&v7Es< zhn2M=XW3Q9OXbzIP7FQt*%u}C#p!0Rc|yVC#uc)$K7v2Y`Dt%y-~3aoRNm-neTW*i zbAG}@d4H1}&@z2u|I@PmzdDPUqx-WIQu)>2K5}|~uHa;*l}O-sLU3CrlWMxeu{6?L zY)tZSmf?mAZZEmB9{ev#QC(N%@7gXK$5npZzhrBfO}P zo<29CU0vXmhZ3^bMgcOav+x2F+2jB8rf5cxNkG1V#~BL|1{ihRH+`z4u`*u|=jH-X zz_dAQ_@X!BrmsX|iuucSc!UWf@|d zPGqhn4a8bMrU$28Zi>jV5>Y7+RPotWQ8SKhIw~Rh+3-{Dvtff-|E_}KA%)4pdF*G- z2)(nvXGnFNqbfObcvye8bY@L?#Fsi(ehD7Ew%3Ss)nXy%F;O zw!pCiNzwj|89EE$jE5vfMB)JlV4~rdU8;D%X5rZ= zBx-_p>b|D+s@c~Ng4s+6$`U|KM~ppGD9jKe@aeZgF#Tkk=6qw*mlVJrGb#ez<6tO| zmx{4M%%EspCH+;*uzqaXsTqSXHw!)K-?u+Mjd*oB={x0!Vi|hOyxh7f8#sQA6)?Umb1iS2e zE6p7iQBmOJq{@qvas)lKcW8TjedHkA5w8(kCjO$pA?3(>dSzSIz{UrJrxP)AQ|&>w zdqxh-NxpD$>aeE1T6^be#FOx#tm6(N6UCk8UF70U)5*}I)6XC8k8}yToP8AY(SlIn67-Ns^dBy^ilF>(;Uf2bY@eE`NcWj ztwex~m{6(I$|Vk2jjEX1Pf2^W^V15Ew18*XspPI z%@yUzv2x6N^{yjri8p2w*~KOO-og>GAU8EE-WS4tr|Y3V9;3pw0xaQ>EZ8T8ujB>n?CT^c>bC1??H zgfSBnz5?#2hc(|D_cal0#jOGR5W{cIK8_EinwbXAs`FL*s1Wa2@i;kc{IkXx!5L zp&(p;{?o((r^<$-es~F&2|tPwD>xTM23A0<<43B4UvOJVMe#UDRU)33LYEnCg|z(SVQak_t F{{Rm*MLYli diff --git a/examples/data_process/data/greenbackground/1.jpg b/examples/data_process/data/greenbackground/1.jpg deleted file mode 100644 index 89e01d47867f6c6188ce12662afa8a2a40712496..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30505 zcmeFZcT`jFwl*3>rASo}P>NC&1f(e-5C!Q1QbUh|fb`x;LJ?2|Lhlfz_XyIvROy{a z@4c4*2_fA4_POJ`=a2K*~+d0)R$EL0JJn zbPYfQpuPeCuJ8bwH}5^n#l!@J#07+&3IndD0dfFRa`Ic`wqHDz0Z`>py zB_qE@1R(ymIfDGR*?%`l1Gq*+OnmJ+@r@hTuM$nbqSqQe;1n@)2_z%G%fQHVkBgh<(PLgwv1iZ4B_!ou%PS}$7iZeD&tVfoLB%Bt#` z+Pc=Z_Kwc3-`zbUqhsR}s6Ufai%ZKZt842Un_GuR$0z91vvUlVmZ0*r>(_~|ll-MJ z(KS!PhnV*IjfX-vZ@DyGnZlVyPTqnT!(*Y{Wqom zKAi*qA*KJ9&i_m0s}=w)DbWIPBry>?;M#2f@om7>6o7)52yl&-m=*v8?B-{N;oPWO zTOMVG0RaCJ|GEbMS_l7n4E|*b{$&aOWeNXf3IAmY|78jPYnJd^8Ub+sxtdG z7Gh9Ps;-|y+)$I8vI7r#?88DAUn={dVQtZeC9CRO3HlOSn;di_b?)JroISP zMmWau6RzuWotU*v^x{%)WUWV;BIz;r_-7!tkNVU(dT`V9{R^RZxbgAHDbp_4AX+oA ztU$o_@xa~ckG6u%=Ci-vX9efob4Ddd)*J3_O`hUJLh}#KQ^M5AUGwNB@XHN}@uRCU z%_N^9iAhwc?;`kTvg2!4&f=R($VaXK+4RC9EQR?so!9bPSa`Fh3j^iS%%#Gb3iCLf zw>RG!AF`QdlcNEDC{pULzwg)4jNvooYz^0@p}ae8pg}^v*~HLnofR`ai2ZgCc&7Y| z?WHrk#Jk6@`$X#AE{^A}Rc&Hh@Z&DvTu-h2O=}lPjTS%Pegezu-@g36uIRKeqj{yI3qG-lg(e{q& z<5uJv1aKy{{@hfsUdu04X;@M2v87|Xz>rMZ5acE({)tPxN=%$}$5E+~sfK4$H_=J0 zjQUS{$#|Ye+Fw_VtJdPGeHw2Yx>@CEnhIS3$VzkX$5nBRn!}{g)_$(;b5q3Iz3?sD z^}(h@>P5CT$ze&DAOPC|BWbQsQzMg>-X#(7o$NJ76y|La7JRL?ngtN6zCM^BR^$fU@}hmJ9JwY$Xrru2-oe z2pERIP2qbrdXwO@z;?Be(*3kw-D0j9nM>q4wgTcW$xs7LbUn8F%pO|BEe z)cv8lfsOLkZI9vKJG2?IY7#S)KKTqiAHc+RqkAuGo$b`9}i~gZDX0(9O;$( zzH33p%pPFvD?oYV`le}3SbkyQQ#t_2H~M4D&e&5b;Gno*{oExr+h^MXot#&YPwRpP z_UD4_m{QJyZhz-103!Y&{n1&oT-%bLhP02)9sE7l#NtyN!;)m!BV-)$^8aiow&JIx z+D2zmZj5K99z!8C9QK})fCq7hroknA4Cdj%pRA@6y0(q0ukp`!L^hr8@SXU@NY#*o z=UfxdPu*SLX>w+sLFHaHt(*&2G$qLzkbSLXfXPUR_`iH z*E*Gme8^?-_%ZSV!h@M1L`O$24`2S*f~0>Su?TeTC$iljs=fbQoM|%#LUPo-YbuAF zH?C;5iuy*bN;;bMA&o&=^~YKx4`>Yd)85}$bTIlJ%tuyO{|Zn>)2ZPiWXeb3Vrt4; zCM+7`w>y^MEp}E*#W4AP*7Hzz8b|8`*$3BP?}?6!YX?rzhC^52BjS!EJ3mI+m3_x6 z;PK+km$lnxY-+M8SqkxD6%QFNd%C_G7W>G=I*JGwT4_JnlY{w}@@d!2=Zi)+lgf7X z{t)mH|5OtCbzJU+ju^&q^@PV*`AA_I<#D!{GiO|cv@2{=^7UTW` zKq_ebXSNahMe{g#k#~c|_rzY5js(yl8#i(8Zq`4N%Cnc+^ZBE@*Ur8Uq zL{i*c%$(hvD$-SA^o%$!H)Xm#adn6!Gd3}w!A9jPz~-CdvZN_SUiBP9!HE8 z?!q0&-XZox&7*Fd5LzBtYW@5f*7K0rTY3^c=eh;^w!yngr-d}s4;w9#Ugy4iAs92% zq91nLK2cx@qgz?Q)eF9^kZ28i^bK(Be^OAe5WHgbS?s&r-x6%!f$>hoq3c?tQ;H8R zy@3l7NX-mdDZ9|{6X5LoAw2#1y>J%=BbUXcRTQ&Q@>X-7&k1tG#rt^m1XkbAa1<*A zWo_rvZL7WlXmDUuPpq~jC@@D;Y_sb@7-f8DQ0sYiV--^~)|a|R^~jQIWQ{#9kb3h9 zfGXv7sGKmF%_`+!v`e3%agMwKtkd@jC%&UMNIIaShy|Mk*g-~bs@Wf9u}&5ZTQ12* zsN^o)To>0x^GpSoB|Hj7ltZa8t63aHAg)5$zf>-;HiHoQ3Z1 z^@WDS`IDKwptv00HQ{nKB$lJkoc2%4En5?yY37!uv#B04hUK{q!1#BhHp|% z|J?yaiK{Dpwwh=c?YEh^Z0beU9jB~qJ=<YItId4Z0>Y&1xU$u+(@5afV+m|`@Z?G)mWen zJEyxGKR0e0tMo?S|HAD=_v25Pq&%x@C1g;~VXfLv>^<)cB~5i9BO@Odi0xE3INgMw-(p0!U=W*Omqa9TCsB&&+YBY%5YF13 zpmN{`Q$+ImVR5$chcmHTccTYH!p9bv%^NvM-?_^$Q;RK_bxbnYy<7RDJqCYsf;0-Q zuKo2JK;6~ogyoOjJdIIq$*{nP;D}jZ)axcPDIs$-dC2DD!VUkI0^hURe0qh;0vBag zx>4|b>Ls?4kCTlft=&#v*B*(x0XZ|y7YfiUylN}HW4{8nI~2dmHD%bEsT4(Iu?VT> zM0>Eb?Ui8~;mGB=m}b`e>^vC@tA4)H0c^&ccj&1&imOs5?|2Fv*(`EMp4O9%iO=a5 zu$Y|Jo?oI6{tk~z7|$$(O;4*Hf-u2!nx>>|;zaUvZvp?HxS?BLQkLuh^_`{NHAQJy=*M1_~A)rw3Sr_8&O9 z0WbJmF?Td>b1>gzI+0@geXO4%4%WA+cQ7TG9!=Vzjskfdc2sd2H zaN2qj*vryvz4zm6S(Nvy@U!0>2iJ!kbEY>AMN!w4`6%8w0cXLE+H;Mp?ma;>%n(m! z8cG%q)?YI}@Q_qttrI9!)FhSbHBE zASxEhggW0Bpu)X%M$e2IQ#@?8hYq|fi}M1~?&;;}W?vk0@k5w?&2;b&AFp$90G(g9 ziSLvlKd}5c)L$H7=%G$>;~?0iZN2#Zc5GY@zm{^nXwt>JdQa211CYu%c$V+FjK2di zN1hX+%@r|<<|&W$dhNl%B(39)@rBryXxtH@VwZ*VQm;lESD(Im>&o%L_Z`TE>(HG46u7GdV)|vlX+BRP)-kWbYI(FWPb;DR^riG? zGz;ZhjkutF@S}52s>P2Jg*mu>d1S5N7&dl=tMweJs?ewlT(c2)egz;O7fe1S8KV6i zlmFpC(1t6nP~}KkP%ES3nAP;z#qgc3#SPoBlG4ojaAhWJ&{-Uo792I6BPqtCZZq;J z4cvt8A5>THVlR_% zc!nc>O?mpm1;3X!=rmsf{@H?QDtu{b#NZa@-D6DM?jSSO?&a{dDU++y*%Nx2V%zlS zNau{=uJiP|pD>jZYI+-1W&MC&+BnnRF(!Ra>8z6#RMNXh7nleb(sok&v^>4C;Q5)N z{P_%8cnjJ^A`6^R!}iX>lCc_;nk&E5k%pCQcw$P1W!K4`jSGsK_gEk)v1mjiHsfoy zjbZF=;CfOL>3H|Gw;H#qQ8UW{{c=kzL)7K$-XBXDsrRovWs|0mO$81(qJVAvxRhC?triGB zDO%QR3S@~MWJkUw!Z*SUGX(&?;@atbKu&kwHLD2lsNrg5i{YWZNf3dsHA%?vp^1aN+poHj!A1S6$$+}CV%y98l}_NwGSUW zIOrbt1Z~}r(LSO87>R?;6u~_yH-|{Rb@;((f4MBKPaMO0x|0uCbbaXRavnEld_A&i zJAA_+a2MjJ1>|Q+q|;N^_mS3$S3E8U$KEBRVsPT6|MOJradFr(#rM#fe#tQFioJA% zGv%}Ni9lXP7Vj`CjZ>quFPP_jz?<~68Eb6lsx^5$js&)T-yU|c4*95QqIv3G5v!9o z0yYCLsV+io)i+ipvv#(vxeGM1o7$T@97A!cgfZSWPe&(b<}iJCx%eoErvH14ZegIU zm%~l22+l6Gw&)XS#RZ%1RT+ZkXEk=z7%LeIjcS8TF` z(t4soSMuwW_U`HTb&}ChtFe(BiIK!7OG{S(v01S!jV#0a+Q^TE?hF-qQ>Sr)6KQoo z!CN&%DwSbo3pp8j>I!)tmF8-SGbO6BL zA#}7CPD)YBH*OStAUtM(cejX^Y)0DVrdo7-XNOXZ0do|YR#+zo+Sf7*K=yofO_uU;PAdPL_E;xJ%xgeE5O=9Pem^76(euX8<#eE; zuGCW;74`B+d8O(+my+K5NUAw9$Gtrkp?Uf)BV!=8^MfW^pSn{E^&UcdBtEyUb_B^8 zBTToN={giLu!v@H-3O8?| zTum#p=cO+*mDz)#=NEaG-*;eg(Nj&nE1)tjg_Hg~Wrr3^vR?`;9yKe^%P?UVVVIe1 zVikQWQ<`uq_5Pad+RWMX-Dx-9KS{hvK0`2@G!$NC@XHcKT~KR-yrff}q?)UXykZLF ze4=_5eGQas}u%m?bz6%u}|ozkAvKrz<;z`khp0d)f2T)SuLeJ)e-x zzk#QAsK-BfoN;`p%55@%iD$3pe>#a@{E@;NlyYvIxjHioKffr{I6!r`jSH0@QS<}WUctK zx)NW9QNvm0gqgIAz;Fh!W^M%g^Mu~YpAof8gX^F>a&_d4{$aDiZHN}jMkndAv;xKj z?Ni5AEXuf!=!I%emh%o7!Hxe3ZPQzMd?81>T>(uS>Uw7s(naF@j=k#6L(8@usElUk zE0SaSxy@?*NRl0M@L=;%slT;euZ36?SNr$!c8a|n*tMH{{$-Y7w{#qY+zwQCZaM)a zJB^HM1&21n#=mQN+MSYqP=5O~ufd$D)JU<$b&aol)yk{vgoo`j{U7_^9fqDD_WR7c z27e~XD;ibW&CRgax-eQp>g%qX!EB*dfYLDBg-qfzY2@lBhHY$Ga&hh9dFYg1^EtT^ zF7IwkpLNJ%_$UqUi8MYyK4BXE}Oq7X2i3KQ;8bVNYU*k=$mM_RT?i6`r;m z=OK<4n~DkZVR;i7i3bd{-F0(vEy^K%h*P6D*!it{L%CAcym9Uz?Q#8^BnM(Mo3eE- z-%2=RIvCh`HqlUQ^*69Ore0jX&BPvH(zJzH<1I3Hx+sB4!$~6_E z)_HCd5i%PP`ovS7&{TstL97M0W#|U|XE(x%ddfQ12H|hUL5&*B@xF4&-?3@bLcj&( zfNiA{25u3I^#n$N9^#__!#rt7f-Ae2i=J7dHclcW2M4oxWv&1eb(1M4I&b^wTglj? zx#WhUgQ)v-D%ay5_1t!jagOn@-WWZW8d+tMRTusmp1CusXMOsS0MJD}_>bWbUI{9i zh;xNzt>U7V5K&H{L6|^bvTh&ihUrDBKIsmbfGzb#Rsj<{fei4g31S z0DF2ZuLJq1eqw+GHJ!S76?xu1OWvlbOCrhd zr-TfAb1Pz=wX3Fdy?xS)KJ=ySKtAMp=gpidGP{YrMA{goc#!vPbH)Mh!J|@}+0SBA zULF=-b%GgjF*O`hZ&;cG1}Gm=P=y?;!S7cU?)MU5nSET1zfSh9Et?ZgNHI#dqHj%A$NZ_5)VA$TuPFadJ-gC32j;-~ zAba4Odz&X4>|v@|ckha@{vM*Zam;yr8H31qFXrQFvIxB7ir3B8F5nrt4GR+MXg#-? zbApAx5fZbP&GJ=9OU1mNB_ygx*F_Fi{=roIKm3`lK;?}e)p7ge1F}d{Cut(q%8zN( ztES4uTScBu9(?xjM@{EE1Y3Pg3e_B~aK3V(oqDm;tUb2aV+bm8C=0~cTmeKOf>esB z!e8-*?re*WW9V~3c>R}Rl>Vs36@WW&Ln~${JNU2xu{0>{E&U>V;$Ymo{^J-+z)KzV z(3#`0lqDP?w4vTMta^rUG^^#6hTtnt0gxg1>uW-S{b#TFDqwC2+vm$AlA<4&eoe;) z>~Gofsi*afymC@8{uOMwe7jR-nMRQV#Wqp*aoL<{w%U0wc?w4SJNFtB%a8b40y^kP zLu6g>yV=VI_6RG)40Ye6xgC1*hZsag-0byN=jT4399lBMUD^e!d%Grb0z2-eht`^PsMUn=$AYqm6*4-ql-eFC$_&nGfXRy;oQ7jVYh z+j*G&A?%6p;$rwUb#V@!Ae40B!?E>tol{1iN^j%ZcPA>ljrMu#D{i35EJ5v{r|rHJ zT*{N(t(5s4CiNWx>4MGC-Z$y2klJXe(frJ2ek9P=Fq;5$xg^d_emO7k$(X1#L<~yw z?#EjG`@88nvRzd+m-@7HFYkJ(P}+w`4e|Ybqu-02@+3K*d$-@`Ya;o?$CWpIJ1X6B zj)=!QEGiG;9-$C?5jq)(4fjDRAfpNyLINU6{pdfoWB)kbOHoi!Z_V7xdSL*^Rr+J> zVDsRbE5L!6m{H&O6Fkm3UjZ2TM9zt%;f)^wfb}oGr zcNYESK|GiCEGaN)3wZBv&{Mc4FQ6iGRV#H)@8{QDJ~s%{5}RDghjHNxouGI_LGP)V zm|Z@w;xO*3sHVKvOTUAE10+BxlLEuTGvA2w#aFHX=;n0&{PE8mdw}*>M1ylDo!hSI zfEtxwEBwqm=S&`-=SeMlURaJdG=xo$fLDBL>4DbCZ}Iz5_>Fof&-XSOtxjFU5uvzY zBs-GbB7tiW^pq8SC^jReJ zw}J$>x>xLnlW}O{Q=r_dC5swVSkc$77xa*E-;1hM+%hZ)p3&pH&jh-ckyfc4*tmPk z1@GM4*P4FXmo0dBjqh8^OKr+7dW5KP(5BXG44rR)=>q&aaq`3pxNJRtGwfK%WG^S# z>|;H;lwB$8EtZ(^c8k+e6+b-M+u`z4{=B-p>z2}rUg(sLf5}m`d61IaW4?OU{u`Uk zy2<_^Or{gC@xB#WkFCU@^tsZQ-_UP!tv|idQIo{?4~N#nQD#&xWhyZfo+Qi=MaQtG zF@#Ll9+Ne;Bg#9Hyr|wfFJ#w!}kP zs13{nx9C&X;7e@N3djhOx9Ql2TXsb=UobGskh|^gPn@yWP45C#t&f3%f6)#!|18w& ztrs2D;@62S)YaO0G)*^0!z0#{#0vkbeHT}A-FASt7T`W-YG$yirYk)U_kR=3%a4Tq5NpdXkp%$`JD0~SVCTqHgI=P$$Fygk#S6=fLL(%@k>XP8 z&0-5nF2>yKdyj`>x2Z$dDVw%gH;(yhVcYb<1JoxUu_C1=(4iVx<~tcl zPD0y3h>_BC!+y=QdO(&`ffh8m(>8*pi?lVLZSZ(o*U?@#^O!D;%@4Ic`rA1=K5J*K zIW=VhtpToE-ym7j6?bG~%I})g`FdgFSKw1%2zIgB?EHj(`tnc6eC?XT4sa3-^H~$D z?A?`PYY=MJr_5KOYFKARv`;Wq$2xa9hjhsedeTKR08fFtobi%WdJQ^?IGR>ScI&+4M_;i;>Y_34$<0BETs*Tbe0e6kd zz`1*5gOxs|NY~Sw+LXm-Y^}XCEVY9?z#QikQK!kRVJSOC;pYpl@R{}NuxAR+qu=nk zHHRnWU^1<=97gUY2|uBqjdNW$y>+!)YS0B-H_m49q({fhQbPJFC+p(cYCMC$kh|8i z8DN?TogcS@(M69sd<4DmgqjtDUyP%I(=uOKN!FHf@xuswKy}*zAs>b7_0u07wP?a| zUGLcjVA%#M2zsfyaM+r8AT$!Wsh06L`Z9~>l6e0Lu>ZOF94&YnbOjiUxB^^WXl(M^ zYMR2w7Ow!Wyudhe7_RsjuDtM_@HL45sXLBc0ag`LEC_{+K2u=oi+37F=8C6baEwP2 z!H8J{b8IwC=CvJd&OuiIXD!@_xQ5)8MlJ{G6<|*kv=+UdY_18%dzr#Zisy%|=ck@h z6}SL*rDC8@lW?hDuEM9i!bKLGd*;lj+`J>(J|r_zX}Mp(B~EYL`3j7 zGR{U%5pR@_4FO-0$y-~7?P3!+vgw-J#8r=C#UoLH?I(eW=T_=q?3?6vO@>n|yeR=( zY(n8|94lVJRx7|~Q-nW!^HTby=Pe1%b+G}4W^9oRH5NK~Vg-O5NhJtte zZ2*j6%$?{*CXsPSvrrF*o{yGGE3Ru|gkXcBgtLNhkEK4t)>B~f?5W`io5)zcDIL5w zWQW;sR_usxN&}N~R&K*9$P3&HP+dhfufaNh3T9|{!c5-uuE%b>s60+pEWq`@U|sv> zdGeqLdrO{MdW+%C2X9L$&l&2?Af=k4`8oADBRpp!tI7fm>^2gPJ};e7L&HJsf(%@$%%kXRIO;vKk)*-aRnQ+ z{X|rjP+4{NFGCFGg!M?mHhOS$?tulYXPBwxD3fVcm6FKx1>HP6-F{D~K)o;T%&7%;gLD z8DfC=@UicY1aaw%JC+Jv%&#Yg30ab3rHMy@aDsqk$$-BdaJu#JT5ZWV`#k5;7&IP7?#f}WH)QDlUm8Ns?yh^81 z^3tE@_LQ~yR29QW)4l+|UvLFY?6aG|djo+C!Rit_iuvCRo?QWEgM#o*w&db(WbPa4 zd{>)}uc)-`M`O|zsn~7=w{9zhKdyJFLV8q>R~P(oOYtai7Q6i9^7vi(E@z;sck^SC zG63lNeeoTNPWD$dtoqs+lwbY1S6F=6enXl2Tok_mVjtjM6_vC z>Vt>tfqk*WMfggDr{r^@0Gus@miMH;UBJNbAAg^5(u&FXgcpi$T?fUmKiJBNa_w;w z%Q3dMK;^1loQQV~tHUQjbxr5XLSPQOgRAG-k|g@nsuuNGTC752q%8 zfBiJ&x3)VYu?Ywmj|8x3n`UKB$y?uwev}l#q=3$T2I`C2Q{IU4f zW~KH@DPm#&NV@m&$MMnO(#;HN+C>ZS4IUPg$Gpv4?t-;1aB!$zigQ9iktjB=c3AU` zT=YCgPzI^Y2^QxyPpdXuqLr$*k``ajw|UMM%)%I^9_&+*vg(wU)sr)feMzGRM*?;$}$L*ejO_Z>TP&W##%*jmf}0f43XV_NF5XQ7VX@}A9i2V zCBwE)4-M{;%2JKj-OZP~*2EW3$7YUNf_`xg1|A3iPaaVEXe97O9Ev6TdhIcU2BX-% zuv|tyM#};u&%1SXJM=vsI*|?@73c5YUi57L{bwP{h1i^Dq_CI$Id<}|q!jq@IZ( z*6{Xt9{Je1dS(N6n#J5;HNtFq^Weuq9sWhLLcg9p_XCDrvurEQi0Pj@&z4H%+#5bc z-7p_zBcQ41OKF=+KNhJg00XP3*Jy#&zID$^y>(`_8=Q~jQv%h`T(p|0VBPVth62=G z^^5b5mDA`vP41b-IRj;*#kyf5-yGMTzJuGnWdgB&!?r1DgzTBRIb|}bD5n(at;`zs z4Vvd}9N9YY^kw)npM6X-|16L~cZ+YRW8JG*b~QL(@gm|Fcmadeb@_WPi{LCDyR`oC zvd+H0?txmK-zz(|dLbBkg1PTxz(I%|&T&a%7QKu5*&Ga%k)K50r9oaX8cf!<6He`Gc1($Sx-TDCUt_$|4Zpk+E-q@M&uKuP!{uWU0+gq( zm8aiWc1D*CP$79&)VSzIP@Y9T8P1!_$M-Pd-+5=gMvKwkus{;V-uZKj;%|WXuJ&D?dYhz5NkbR5l-gFo85q10fY?o>Rc{UxEhqK(ggQ8X$$v&{wy_v z(8@A--v8w;KbH1lf+9nV${y<;OAuS@iXJ=o2nhsq1z+Nz6Qpu1F?)ILON_BQ9iZLiZa+(5j8_XV!nOK#@+$u* z@LMTd`;+WVA9G^X%3h+0DB({gA?6IXT1vw@&noIqPkJsFL=c^|7E4vTq|>oVH%W51 zb1Y7s{JK2D%{|(BPN^f4Hpogyvu}5nii9w&=NG?t@%7^pNM6^8tmUO^h2QV4n)lmN z)$v(Q$h%SnH#ZJ9XQ72&>`gqMpQcrEe9^1eB@<}qegRC;!?0F)tLQ$}vISd-53Ux% z+y@715^r>CiPuag

zfuPaYt#gQ#%HIZM75+&rplLI2ql#b^3uD{sI4YowU|8~9q zmnVog<=o2Pr(K(aC#A$4*EO7-AI2f0%DN&4b`uCIfoK8cS6+O7%^_O;N$3F>-{|G= z-4A8a&BM>}dwJ#n_yB;cUp${C(>yD@0+43qu}Az6I!>Ekb{x9o2wt{hv4)9Roh$Gt zQk5E431SL$U=hu7w{HBQT0v>PXV|BejP91EzR24&9&uVMi^^s@V2kifd3~eVYu0!V z7~#9DI2E)hrU)Qy1->w!N#>QGREk) zZYIBAcfwav`fR_rwg#X%uKtf&O#Xp`url-U3@FmjIPQI{X{fpD+9Rf4EYvxEd$17Qr-g;^Miu4i zOH>xl4nnbbuVR&quT!}L$?b=vUA!!%)YWXr?|y-Pn*d++71`v~j?h<08($W$sazS4 z%m|BM^E%-OA8Zf1{EDNS>jY&N?)FLzOE1b*`!iTFPY z4f;PQr+UcLf_?zCOo&Rav9lr(N24$K{G`0MhvCOMffp-btz$Z6bT-miTDPHJ(iuPc zzL95yi75#-DHU_cE8daKt_oIC#y;lz%0IRLP{9|f`?_g4_P*c<@>3Ys^+!zWPaw9u z_>*f*+IOmvQui+H6n7sO9E~{0=?GpoKQDUdeodo~9AQ$+cD@ITS2+vojgs?OSizfE z`-iytoSOtFmfA)ieZI8?EAcFUBI&RDo4*0dD?#wrlH&X zRjZ4FZ2Ghl3y1~Q7}>ZTCg~U8vcglXjn&LGR&?|V{(7AC12Wq%Io+b8dvUY8v3dN= zoJ)B1uIh~8Q!kW2yg8E}ir@n{4#_K46|y&b70JY#W6SmrDXA@zR3N;aA{dgBCG)^Gy?=|(G+J#q_ogm*F>P~f8CiFnP z5w5=M89?F8hU9)|d~8=DkzBv><&l*oxm1!N?XEf-Rt0GAd$~8^J#K9EtUtYm)aBie zGLuK6IJ0CA>NzQ=peQ3(AFJwB)3WzB#|)1nZc`pRO4or(4S7`djHtiQ+-$cE(CinC zYjElF_=W~9NFr>?qQaCHi1a7ccLf|Gom_rIwgUGd#%CuTRuY9yadX4!8pC`U+@1!* zzm$2bH1qv3kOvCx+y$+!2}56FPgK@}+8IOT1k<#C7CjopNL!rq`So~Cxu`#XcN2fW z*p24EqUdGPIn{m-<~906=%&naGd_R-^H0Ra6V0#nY6)$$)g4LeuOVzvN*F5u|PAC94-2S)@(LW%eYJ& z0$q@-92Vc9!&Rr}Z^Qm(w=_$&kti_MICsWf^n1_KcRt#(cbImgRHGQ7G6XEB#L)fM z<~{#tojd&=(a#;a;t7P}0rJz*J?5k&T~(E`pH~1n*n&Qm`Ivh5hnB66nvUsLWhnIl zP<+W4__IT{+)y{9c_{=D56>C=fuARm;M&);$viPsJ@CZbCGh*WKh(Yc|Lu)9|9M0B zbVKj}3FrD$nBzgBbQdwfw;@e15HZmG(4cY2uqyyROm6)5Bi}bhgkt&o1Y{~F6DQzf zU?FD25)uSb&vs}?X#T!^Fvm7!ZUU-g{0p5^odpN?P1jedbv-PbX9lIuZ;?HzU5}K} z>pCed(y0%ldqH>#q`c+IUXZMUzYzmfX`KZWEu&y9KS2|guw_a#F)*q4!~4r&Lrvi` zLs`cALiEPns|9}HX;Dg>e~wLZ;-_X>iGvyy?i0gHTs@+0KA>36yOm&fN{6=n%G6qj z!?{VhZcnpB2pcKqxO-M3zTHaZjzOa$4RG%0ik>=}Gcv|0Bj0Y=&Y%Ziy6BYn?+ z#(LqJ%6#7C_Td@dHP|j)_d#zyP*B;l0MF~Y8s@|f~AAdsyP39L}2y92QF!9kpq1i%GQ&WX0(I)M#ILYYz!L}L? zcg$?PVDhv-Tt2KpZ8Cm? zad64ixX8t}=#Z;bqd@KFURMdVm9Q8i>B_UfRfZb@0GKL!9(Dh7dEDda=Hv zw4j`*RZ~@^ObmaA^#E!dlvYLgD^Nl#DaIV!{0Q29il}iZlEBukC9g0+YQr3!Mu{Gv>--u#Yv~QJyi=jjP%jX6dg2BP z4kQ!+q!6$E_xBtAznM8}%#Y@|?VT5hO`Ux!+K0OVpu}J+^a6zBJY@-86qT=FyITwggk2|=U}nhWOaIJrHXN^zVpJ;oVepA!nvTXhQ0inKM9_@MR~b4H4P-Z_n3 zXn%#4pCOh8c;41sylzMTDzup!>tB` z8+`X}2tSvD8=@)i^}NuBqS#lWuB-UQjuHy^e91}EmPvbDMuh%y!#~)wmsM^T@N&?;k^>TFu z-i5OzHY`tP=d>7t+8Rijv`wqt59*<5J18!co04^c*$Bhuia%QEu}-8B8t|kn*c$c6 z&=33R&Uo7;A>DvzXv7%wW->UkVrj}Z&+15;q9?Je69rfI030Wf4c&J8g_wmXvcHf?(ETr$9dCZSUn?WqbdG^ z43nN!CLhqefPXbm>79>EDw+T8w7>1QnzUE9o?@xBy1o@AbX3_pJ;z3(pS<9TN8N9T zkvyTAynKmw*Qi}tL$f@&;mKe*0>|IrL@eBA6>!(nDc_O%`qNtB3a}KTaZqbEe(!}> zeAB@zM2P_*Aew53+t!Vl(oQvveAA9AZ-`eJx2%d+?)H&*_U~eIdWEH7OTFn}Gs$J# z5>nGNg!sM`rG=&~L-sOmm{8$gCv`m1TXx)gNL~!tz==Klx4~nn(E&%%57iOtF>q_qr!KwRBJU7o({iojeK)Cff7{-Z&g^@Bsw8o&Zh4Rt^3~-ziDCcW)vv{Jmz^} z{$@2&Vs?o9GEi7MEM{CZ1u{C$#virJy!b?Fc7{O=c;F7^!60f{H48`w1h$lwf!w8V z_+qhZ0ofw=v>QsA?+U=9kIN_;TYjVe;7)gGmRjZOcRdRk=w1iE73cjK6xcqefL1A1 z0)h-S3}E{}@@;bOH|1}!QiPlD2dE-_KyK^iSrXn)59=bbrH{{T0)1XeCl}r- z6g%s}6Xns0p$%<|3QLZ{;qax;JfUrag@wyUaj*r3_T48eT;mgZ7zGfUJRPVd0Q38Q zEF*q6Aim|rrmdV4DPUtb{x{8iv-R-U^Hb3J#2=xu=S0A2G?ug=ATwH=87?}-SOgP z`C}QdMEj*VlPIEI^~`qMPh%ekPhNTgD*NTjdAW78N(h?u$}buEFcio1=rp$x&j`WU zuGK?eS}7sp^z!$5Ku3&?DN?EZxJGue8x54Nftq+RgBNkN942oQq#;3n3QDth_V31ZM+2TWE z-RTXT@~hHx68nu0%I}R71vw?+>QV_|{VfnU73h$qtCypmIprLVx@;)*TcVaWwlp4&2So%?5JWJdAPOR)A|ix{3J54Ubfg7r2$4=`LLxSb2vUO(hzJNsiAs|i zdgvuIsiB5WAS954B+TcXA9Lo+oafHHcV_^<(vYC7tdlbrt5t4OiK$wzn^Lcv9n#j1s)Uu_%#VpL$bZ*lnN# zc^kk^j2g`Y|BAYhAMJVRclFb|OHw}26rd#YH{ACYh7&qEU7Vy&8IF z>w1jE_#59`MIF__kFkzfOVV(s5+0$d4TAh3DM1t=qETIEmQ^f@c`B5?CZd`DDiiuS z+!N@()X$Y$ki4jUgYRmUvnjks>n`%iCUqIo@9ur(N#7wfCNr1annAA8y%C{Odj#7?HgaD@G%%87j0YaPELAUDmbO z0Y#u|+};+univuS-STEy+mZ)iH&^b@GNSR>3Qv}<@9=ozP39PHjv5!pPMX+tq7|GF z$+oZ943$7aM%ARu)537fxf9(oNgIFud`mr6L-BZ7lng% zKdP#XzB4`9M!X2zn*I1r0!*lmkyOaF@&QyE8Te+FAjs_4Lxb3l$ykOzg8jtTy;1YY z!@D~?vZwHCXOyUpF`X_q>mQ%Zl?(i(g+|BSEG#Na;T>*iy6TwqRVudyOLktqw9o0+ z+WuBn!L6<(Sm4X3o@=!4_6g!6SDr*51I=IQzTWd$uccPA2DIp`&*+qiA=eYWVX8OL z^PV2N%`|L|<7dZIP*X6s@24?3jBNAhh~S4NGMZUEr?55M7RUxZjbY} zO<)nxG>keFs~mxk!OU(I!1|Ua5!`;;%?$jJ^oRk?Ta?`^_9Qy`9#wTd8~p?wyo4 zkcJBIG%0=D#9|K--Ztv;z0S3RJ$zizvLL#JRc-i@sPjq#GJ=U4UJ)n3lG!1 z!WAZyJzc7-UGU~7HwH$N-OR^Gxd;A(-+%eb`2dMDF_z?rXJDC{@ze9msI+|k^uP}_ z*5wE4HwaW*db7v51ce`*q*l@<&BTfOn0nsCd)-u~)J&RD0Wzxpo~3(a8CGv=O*FdG z$bGzi{-XDKqJMtBCGvs+RYgua36lJZph7Ok#bq}Q{E$Qhy#V2?=3nQzOOI4Y9tJDu z>ga=LALY7k07^RyOb^SS<&*(-Lp6K6*PTig82#q!XV$$Qeq{SrdYM4 zdy=xM%!Vuv`ajHGh$Xm2XFq?;s1>)6xi4MrIP`U(vb{AN_dH*WDCj|rGaF<|T`Bj< zeGQ}=%YgD`ScPtE^6!x+cZI%z2}s>y>hmvfu@`fX>SsPt4Dep*y6PF>_OSv+RLus$ ztT>k|B}s*b9X^lQ_Mb4;Z_J&FddF0{0fWAF>z@b;3=Z-kk@HOylp6tGto^&Q>J8}E zbAI>svZ2tnaGK{RWWXcNsS4Mjv_`PlT7IgbZb;8HrM3+>#MjBBKh5Cr6&$`}4jzd4@N{#4t$J*dYnyJWMn6QyGq*HCbaq{4q9Uiz7sM%ul7sw6X`*(=g0DT)B)7QUxbMIB~RizN#r9BwE z{`_wpF`$`7zbNmCR2-V%F$~!3Kpd8EFJ_KXV#TLdKLxzve!^s=9iA;Racfb$yYYTg zx%)#lVJhF7^jHB#86nQ->kE6vehc?RhS_438Y~Ax>g9zQxnrus{DaIkHv{y_Bvtwr zqggU~tODE65BSWN`9=e|#_8Lhan)O1CpTLArJRf9A*E_S7nWaqmH)d&83$+_$kh#! z_$cVY6|G5KXl8&shqlAxXA$X_YH}(~Zd=1*Xl*&D{boh0@OCUJr>B02li*ebXx=g! zw#P+7%EGZJS2!*BzD2;^33s>hc6exsiIzsLERG#&|!;Q2suYBi0S}HJdWG{6Rz!F=nV0PUo*U^GoEQW;}|`PVl5Q$ z7RPN*aD+rxW@6n5y|$;a#DEJfWu}Mhf;dLhw-w()o+IT@W<(_~$}2GY0MEI07+9k} z6Y*2tV^C}v)G@>rm_}G%s{bM_*jvh*8-SQzvO_?GUcLJ=oH1j+3h{qR0}BOgN*SJL z9!=$hFf1bMavRkh2lJ7iFK!y8q9-d{N>Y7f&3%S21q9v#^>u&@ZT;@X@2nZ<_F#?- z(tw1RvEgO7=Tm!0qJ~$W7HvD1C6CcZZn&OEcaDQA6^rpLNm)$n@bGRR<*=78V9r&w z8hpxqUz)o#o{zfb=w`nxideFShGx3Um=8`bx zX~4u*fjk^_CaUeZ;GQvz1m`)X-%yNhml|1RhDA$GGHE$6|zuF&8`&rwi&pTq+%sOMJ~ z?Oo3sNo(1znZCB)_tC@w*8g?b7wczUFj!ygiq6yXGBaOWWadgQ6&n)~WzW^L zI_WK}NO9zSdM<)_S>8DHL{UcGnBJIAVEaP*!nTHwv~O|@4PC{;hEL6&nS!_K6A{jr zD>NS=$bFmyhp?!CNCTqX)*^7Q`+xAi_K)X+G%syPS$x&t@SUn8HI}1O)Jje2j=_l< zwZ9(f9NldrL`G%$?Wd8+Q+h({$|_qJ2o$}~7}aIO>Cr>_f+as=3u_z({Wee&cM54p z@yxcDxhBlTky#Nmd1!H_h?8I&0_za_Nw1ge@SKQ=Na_#vF#8aNK$uIw+uNdR=fS9& zfF$}}%(US_OiH@Q4Cipx7f0ia$G!1-6*|J6=#LhvmkfLov=oAVFj=K;ZMa$6VvEvX zmnmVKYxUefo<)sgf6yn@XjI#G9f&FgNXjS%6jWKoKdhXY-KwhAC0FWN4^J}>_oayk zl7RA&OMaqD2EtaEcioIVW0_UeT7PNpys?z5?Yc^n1{n2VGG}qKVS8yMmdR@;numX% zeH%6M^JZ|DAS~8T2Zes8U_U}JN5w&jSMGBiVT0N{uq-t#V35)P+K zh(~O%GY7@eb+?NPR>Y3#_TxWri)o@+E&3Vwgo7y0CsXZn%WGEv9kL;yv#lxOK+|y$ zM;*WkCLBVK9qtquMs}S8T$c~vx`I_eeSn@7*LIEl>|cQs|N3X6Uu%gJnd#Vki>SE7 zo%7U>D3d*cQh=`d%HTXTnrrDtaEb#@HWGNF?ecWY(7n+#H*Pf?T~R9Z4!$YFjDdf~ zkGs#(!39D>=V(*dJa1I=_D8gaDcTfE%9(o@A5_A-AHwn#;oM)vH&a)DM8)d8ctig4 zkSU1#R_%$_T=}3O(_8o`Bjygz)inc#W|#Z5iDU3Swu=8|ih7%zqAQ6pjlvQu*;KUV zZ^lc8M(AlkFL|-I$z=1p^BULLt2Dm=f2e&*-mX_EuAGRGqav5sUDhCj5&Lx!K&lnD zvKT)&g}7v-jr^T&`uLx$OtfjS^(Ilc$p_nkV)YMMsO0JRqc%-J?SnlJ1b+-v5V7R< z2|6oLAsBh38dju)PrC#~ABDLaC8Ykyls##Lnkm)bI-)2N_3uhn_40GdliK1e=3hXJ zd3)H(US#Xao17*=&9}9jYX!v0()xodL*Z}oqh z66*`*qWFK-?V4u_kuhQIk41x_gfRo1a;!rC;7I%1V`W>O8 z;BPkJ&PGN*^L@D-Vd|)E=%%Hto|~03WOF%ajxhvMY4D43i3BDo|7{Y*fCf7boH}#Y zn9u%j>ip-fqWec~ly2Sk;waQ)X}a`2E#o3qeOWIrrI3;tsR0@-zIlKi6@w*7Uor! zT;6#Ii1}`a^Y4Tq(2JdhKqE^u6(~4p zeHS(6j|@p^Q~{4FYOZ>1=b_1sz)Gf`G0y+xO8(ylB>bfDyfny!$6d7YWUgsMgqf2? zzyrFS&?ISHlqINTf&FcAhevZ!FA!UazwB?LZr%?!=046*vaEpAVn4a}+Xe0mVQ=Ti zQsXo5M^jbz4b0-~qBd&p1k?f%4HYkJy`pfO-l+7-m7St>Uv4(#whJAskT&pPBR{#% z%jR%Gqmn(iA&0%RGX4Si0=_`fN?pBb+%twBcbn5jY8h+9%YkO&(4)(45$25+FxqAo zH?FJxvu+H)#(s-K&fQO!DD&`pHCznkoOuN_LV4!#2hp$D(eIT%hESCee0`pfe7o#X z53C`oYtpX7=$uVmNHU!AMpsZG>uZBi4!igSS8ws@-Lqw0XH_v6B%{dxhgEgGuuc}$ z^kubPZmO1Nh2@L8u2%{MZ3?NRb|WfYg>W7>po9EzVl|v=OXK4vDJ^=~HRHW4!tP#f zd#M>AGy@+w6wlt_d5?>mQrX6+uc#?iL)P|9iE=x%cX)b9oMNn4qr%n|N1%z{scHs} zdTHtvx8nvEs4-cI2of=Z@$=37ftUV!CJ!{t7B!~M+#R_C@xCs`Ay1&JN^LB{3Z6x( z+aFaMpJRAltD#R-J^6$!B6M??yil8qZX003V;PxN#dqBHWEYHY3{t^)3FzYXFH6h^ zWZ)|LNOSiGCj;ay!~1P1hWrYp)pWLSL+v3u8?R17J)CJ_iGig~|6DK~{K~Uj^nE8a zi(0VlDlW7d!Ifi8GY3b?PU{#y)xOL*s8Knbo{1iW`@})kPQ|j%Az!F{_J{-E51{#2 zUf(D$NU#od8frO>nlRSmiiui0CLl9dNeB@OGW~OTzoQXFO%0NK0iy7qaz_XLmd5yt zmjk<^^VIHKo_;Lzbze=F7SQ_;Cs70x4aETkHdVR}=z}-l5;Bp=!DmG#E>mE2mD{)d zwOd~)Mo^WbVcD~9(7Rv5Xsb$;zU6vC%-hqOFU7`0koY)Vv|=5O<%1m`Q9s{px<J>#1K%UH^XBi7e zv#YCe2U_Kyo(&RXchv6ijI@$cqAncro;#91Eh|g)1-1?DYr8uoBLe^j0IS_eh&FV^ zOsZ7MWK8V}O>58(9gL)>2?4s~7Ir`nAQo2vZGa;&I~ zF04?Df_4w0RC-;NGY%TOGo4W71qbX3jiiI*CH+9(_;AfQvew-ty85 z=+j*2HVg9ID4Ty+BpXs^oF(V$>c-w@9aw;juQv@!3eD0~oF?}$zOW=ds z@!Weo+M?8JftG(wJ6(#dY2F>#e=Mup%w=+ z^kmSGOI+HHMsj@6Ulk(hX+Yiiu}p-AXGJ=6?$!DmUDehG{nhQvEtx&dJ7qUM}OEy zv}?fS0osKhymIS=Mt~|UMRu7b2YZs*DZRrJ9S>at`fn4Vx?x|!MV;;e3K6BC7RtKzV|GDN z>ANC;U`_;y|3jPR|86qxp5_O1W(XZLP#M*5UgZ1!CGO9I8sD8mJ4v75#uFH?ht-b9 z+!)YAbum|9*$)vxCBMB`OmF5mWX=0y)K$+a-U6g#7H$|L)+*F`WcG(YBElb(2U42% z*HBhHD{G08wS}zAr(@Uh$m(GyR!%$++I%+yR8{|ulkHKQiJ6rNEx4~9D)(z717~TM z2srPPG#n&!(mBz8DLgzNuE;T`K1S0$8`gd5RP|DbV<7l3O7SMZ z3jy1?<_g0TR@tCi>7Z0r%)+LvRZ>E5LbKVK0Fu<@?y9dNa#l}CP3{dM!s!|$lMq#B zUFHH;WppUQI;)o{V16pynQV&qbz@-}tQn90P4qGgg`JDGLmz-_%I@&M9GJKASb~-c zxI#b*oP_J6e}BKj^M%k^1fjhbZHODCavyHW*Fk&6A<5d|fc6W_yK($~9FLj}VKI}s za-?q5Tb{$x*IG1onY&)LZv2{I2~?4e7)Wl_HWNxS1Jn?^Lz|2~ZIa383s`opj3HA|u8z=D9a!~|Z-_fCTg)k1EgM$p}#bE&j@&;~WwM>dqkK}A$bMJdQ zbN)&ht)>Bj;ra96&#MVsC@X#EpdMP5#Vixkf7|DT{tgdFiOajn4cTwe zVeGn9nL#0$BejxE&AbF|$FHnNBDgE*tYbmIV0slrO?|84XZ!PZfxi}ADkKiNaXVb4 z1wIpF2-w7Pz~dpRUwGS^>L9t?e6;GDA5NmLvG#k6L1@8&uX0H&aRq$(j+FP-{6&7_-~RHb5CTL&mE-lHt8cxFd9Ke@qmIdDDh_TaBHS4RRglB_RoILZH0< zaAOS07ZolX%~+li$Y^-ghpu6AYAPD(#d z<2Nb^^U_oB(r-~i@;P|veTQ2IpO@pY-<~TDkO{yITWY%<8P{rQmb6a=H~M?t`7&*P zq#*WoCDl=yR-nEP6PN*S1QSXf9& z*Webh?j6%i(F}=ZnJld@j3k+_0)!4u3OaOeslHgxx~C$2Z79%~j(R_mXuNr4R2wpz zs~(_=V4Mdqa;V@R|1%^I<-tkh1UN?Jd@{>L0;Od;Jl)zsNbbu`CRarDTa4KHFW2+8n0?Oj(5KbMIJFF$6wy#1>1F>D{#fWY6G9KG=%INta zr4nuKxzQJFH8#|0<=K9MxFiSO@yNpG-I<{fcY{EJyWB$Wm-*d6{Z<~id#^jM$2j3m zIV0oE;}nP~^5N?kn+KKHrIGf+{BV~V*AY6OJo0tk;2Ae{xmM~$kBDp=Y<<>b_}D=L zzv~5MakbqZQOJwzC2G5m@WfJUtgm&G?bIfd<~V`+M^ok&&5&k&%gsfsKKInUR5wO@Nb;jhBs)flHK&7c3|&EX>Fu zE+Z}^Eg&Q;NB{%^DIZX>P*Jfw;pOBN`X_?_oBnTkBJ||{rMdeGpe6&95{M8Ha03Xb z35ci(?z#ai001E&(LX2uj}YG@A^{MR{!gh+1t1_IA|fCmA|bj*N=)=G9zr5&05R7+ z8c|geT5bayv{+aw9giBSsD`xjm-q{hAFK2rb;IJ?-xA>wX||qS|BNsZ0Eh_wcNYIg z2#JUZ0Qdf>gsJ~+PC!IROicR!FCZYKCZgdIC06~PbviM0QCMnC=a1E2JoI<-0E&N( z{?k(flmX@Cj?OMhL3@x7PEK*okhoY%l%;1INtMU z!ovF3vc*vA?u)1{td@orNgD7PQ4#zk|sk@d^=@E zN+3eQ^&#@##@hRp%8>;bUX=&3Z5rY6!FylR^c8Edj+@%nYUZZ?o*v&^WCb2%W%5(g zCglPIOuVHxpB)uxv<5>KAZIz2TU^bgzv)2)N4W;NPE88f;+1!c$yB|mdph1p<5H37 zMdNpX$kcPNGx>@eocaWguOEg-i6pl>;mZkB7wlzftC5Ki7*9!cK()5@}$&M&b zkBy2DQ1w`!=e#UHD(+_baFeAmA?^ia3*s#z}14ut)gRwH_?I4F5AxZ5|klgIMt6Ay8T|nbL>WQ(_Qr9BKqDup}of)uK z>&bu_0`_|oTJ?}?*Qo>J6Qu=~{nLbAQ83?3G>X!(%rBfYRUzuUGz>P>CzBz`RK{V| zU=kUz$Kvazh3yMB_4h_9jKd@zeZaBx<{Xb)$#P)LDKmIq6g->#BU2pkBXgKmy{)|S z>y5yItJy_JdT&&CYaf5Jtx)P6AkNWkeuO|7K{I2j2;D(S+>UXwCXc5+e%MmGw`wFm zUY~Rze;LM?=F(n>N}3}Y5LkjXd~leI$<;L`xNp5_$Q)-(+dG;)Vi}>dYOrVR3%)jT z9x6iy#`DfkI@P|G7#*vyDoJXI`;;Laylt7^vAm5g0Df4wh#S;4?XevO@mqf}7Ef+0`xf zbK^KcBtKqo=1M8u4s;!5nasCPT;)Eyxxu_(Q=L_K_Eb1lzx$NFv1YDxp5~siWo1zu z*;Zmx6aHnBGjbk##TrYO*M{MvoX9ToaV<$=1_jI*@AkTdc*>-C1d z#sS|g!gH*D3UoV^bfJgk_pPhfgI8pn$OUEk0A3RB^;F$MQ{d~{CY;|)2HZdTY1R@o z5hIs{p3T+fM^eIe5do-@#YT=OMW=|fkg4+{j$j!0EoR9rt%QLhEq#QX^rHbIJ?~le z?TCB4BJTpW6nMxA`kPMh3m0xY0t-XEq!5zMqLera$U2B9p;1d=H6sJ+iv!&HekyrW zz7Mz=n3r~wk@g*If9<=-o9W}~`XC~4VH`gW7n3Qo?asd)oGkVX0(wtN0}gsBr7GO% zAy!AMkR?J|87g1)4jNI*d8_-3-aa!AX#yJ}Y3hp=A4xkF_9?$F)IaGnr=-JW&=c3v zuc{es@bXZ%2$(EV&{bk2ZTCNHK3&s3Y+e_FY-U80nUgJ?>+x~H=RbK+!jG#=uD|rT zq9$KscOGTA7%wFUy14%UdZz&i^5fmeqE!C|jX>=GlXt=ifCaUI5x)*!ccY{^yPMX! zFuf$%PQk?;AqPSYOYN_yq92{aJgBtAV@Gy0mFQ(sKDm>Alz6+K?>%@Go!f(nzmkQ7 zrd@9a?Oz@!58C~X;tjAy-igExLG$(@c-YH(<6G#;?F z>5f$D{YIjWPhGL{;Nv_=HHxn^E=Flkk4La_PZ4D5QZnGOG=*A&RWw~*)Dmk3G{%bO z1c_Gi=6+3C%;rvLa^az`wKy*}gSIQ=_2=9q+0xCA_l}p^d^Tj!7;{G2g63#_tTXCz z%tZkc4jFV^OKMTg@TvIP_^MC(58LmTwkS*$Gzht|8k>6?Urw=!g}tX zX$lN6eyDnfT<4Bw!07@0lrQG2c=0`MS0Psz2U%r@ne;9ow!Oi|acVW16!{+}XVA~< zAf6_dx`Y?IOI3coE$NeX!NPV~T?^bY$!)ui-v2?Fi_0cfb1 zSr~DycVUu)R~kOt8*jc(Oo2q$+rNCOBDnZK-EHlKh-ZSKyko_XY{M|;H4>tg*sxR# zsQC27Fo_PQk|tB>WYojo{0fwoLI9C;fVJZsWI=hkpMC-fq4YxS)r%8FxUX}`kb4W# zdMc?qD5?DdhciltDQqc^MCo4Lu@2A?E43n`sSp&&gHCo{3j8Il<~-tamagwt<-DwG zcF3XqmKe3X5fx@BNWd_!eyMfPqZ@;bp( zZT}$4*Ev*g+)#YiC0iN$U!9jL-i=0WSsHE_313WG$Q}B=y4rn6H>8g{RFs3=H}k%? z@OD~0MR_Tr92M>Tfa?`LQ0F}GGk*ba`_~xW`qAQ{)sx1BgS={is3OvP5T`L@K-6RM zh`(WBI+DX;!YGs7z}o|g)-mAXstPYY}|;$1aT-%3sTLeU!P-Ym`9Y=bm3j=y$?DQaio3Khox5lJ=t&c^zA+dS60ieZ6e_Jy$Zz7(`5cW%#%4_ ziSz#>yL6uPm5fjiunGpGC<2ZP?b zs5N#rTx?qWcaI~(p)t2+VF|xY^`^vE%y~F3Jmf+$=86le6(glK^3v%n3LpzD$^HoF*&|Lr<17%COe|kw;HJVG( z&RiMO8kPxJinC&78{Q8j%B2!JZo-*6Qo{_Ycg36n>}DIw=WN?QFXq5DXZ7U9ezq<3 zP+ln&XOjiwY!tjOvWnfhNAqpCZT z+VGNWNfM8(P`D^`0uefK54xQMqP2ys7h&0>dCr+A*{1<{AHl4-)hmVi2Z1BuE%^}yQ zqa2r2|Ihj8v8o7!Y8ITU`?$s13P{%6L1{y=*h~>2v8UU{+wI1tapPR~!njTH`ST5} zhXol9=DamYu%pDM;gMfiUWoV7qYe^4yk{xCwQ#OxAefsj1v8ZFEUong>60GiHao9E zTCQk!?5^vI_KLY0Pmx*F0^I@Psjq&h&K~w~Nq<~?@O|&_kqS!6)w>TwGMUCJX1YeG z0Z`r3qjfsRN7TnCQ*bbdkSPLW?f{VRt>q<7JpLmng^Z>Pp6iMRBh)%$ls<<#l)YF~ zn9n&{wApBB4?odwTm+-&G9Cvn`~(}&P;6(HICdSw>7Nf5KX!WtOZd|}kpuPWith(U zj3BcR9~WJGOHs?4kkQuj5 z7YHa1&WCnPsj(o&e-MDk8Dlw-tO2aQV0+r9H+HXBK+!5B+^FIz#`EJ-$rdFG0@c3l zygPv8o@pE_v!jz&7X6YSKQEAju8ddgg*ytW2u0YV?Ktc;YZ}gK$*3O5F`h>3tI}`P zV-{_9Jb3FMyATuImu&h9%pGUezuUf305ZzDYKVYIwtbhEV_7@u8T$nAM9{ba9 zbI*i@&0%S>oPz0?PO@3jScHhEp;LRfy-@g$pgF5M4N}M8J6vn*$%mXd@eGUBT6}|3eN80%cw<8G znMRZAWX^qpV(-HZ69GRKf#A38=v=z zLO;~Z5QNb4!mkHdpkvvE4Z6ac8Lli=1s9I!i?UR!`9MD6Uv||5uZ2I*s@J(!5N7inJ0!xl&h_{=GIGtEddiZr=Oo6`Q zOXW4m8g-L%-SxJ}Tv!yj&Ls>xL-0iOEIqC0*JH}JszyO;wu9r95dnspIjG}L(vl)2 zLN4YMm|9SI4^b!$(hNhQlHCI@T(rV9zwKQt5^WGP;}v;91ELBNvE^ANnb(WUQqg%~ zhx<;dqA)D!yzyyma(c2zneO^Q5z+6;6D>k0R^E(R02Te3VzJCdYbkFYgK+8amRL?n_qkD&Z+WGff+JE2ooFiE)Zs>hZxg^5hGp{Y9I+y ziz|gAVrhwCBMMBczeg=L60Bs39`L(BfvB2hDgrWOD@s)n1;)q6QH5i{FB2k9%(+n4qC71pFSkTHEJ7u=QVd+XaQ|IrFE; zEGF1Ws1oRcY-e0Ff$Za1c~k7jFH{C40+kT;#-Gn~qrf@S-@6U+RtOj+*VCHQ>^k|2 z6)t;X0KWrZp*tpdqAOXfqu=hIAzF5R!1=OV@P{h;vWH!DKQ$SiX9wj+(%q<-UQ8P1 z;|@)Veg?jpZJN}tPsA@q@jgc{CsSQ^UjAtR*z^SOc2oL6_}+aIY1hBw57m1o1ID4g zLXmW%0pG?HZgCLuOr$*}m~LR< zz-<9~NvVqwy_m9tvkcJCE3?1eq&DKaOhmpkUiNBw9_(CBUc7PWio z;1k*?>M?l4k;Fuk30{cosLaVsYxOUkJGqiM-4qCm{U~_w^A1o%DO8E{UaXU1@h1YK z%V?8;Mh^#ND(>aYHg#y_7&b#zWajkq68BNp{G;wL4mplb^R)-U#?Q~soDJrYtLA+? zKc2NK>E#cPlq}6ZT($&HFVPkN5iIfn07Uy%{5P|O_*MKDTS-|#E6mxH;=yoY(^g=+ zi3inK8nh+ObKdo(W$k*!_s%4bViD5Z6%<||T2inqN&P`lWSSB^$7fHa9(vLGE6aZG z|8C?RETGptZ-z|IWy5Cj!4@X{zZW^JyPPWR8=fjXoagvW<&V%q<^Bq|QNXdiX%tqv z1I+GgZ4L>Igv%zYPEO8$|FpUAc4Sa?{^XlbV}7AvIIeeRm-(k0kyMO8quwS*VwXGY z^mw}~M;daXo!>|I(f%UB+{YXYGoBhuqV8KFF92^$_y|drxz^H*Dd5$6*h4q2atMzM zT{HZW{kFxqgi6s-0tXymN$bgmkY9Iz&(@g+7v^e_cYtF7!s^dtMS30^axiq|OP{Y) zO{(6G>lRqUn@RoxXWXuZ9P{5tQ5T(255cO#9HngzYsqtXw*nrvRRhGs+hdjD{7GTg zZb_F{14!d4&s%T?hZBBDt~RLsvDlM0FB<0*wFDl(Jwu(l4&yFf{ljB~+=c`W`qz8D z2bI&h@|rI`tVunVa35dFx7N!y`!Ac|Q)jHiR_6!$iX0dtj-~au3N!PyX={aV=aZN& zpU07mOnC!C6WK7Sqsw#>^*BC{UK>5{n?zySWSK5*?R#rpyx?T%3ChnmYN~k(G=smR zJ4Xt@`i4p3c|jQVKk}9H&$nt0AFaDgmA?1!#tFDnyhiX}TYl*?cAnETcA0dY{+?E* zukaGZ-kWAcs+1zg!;%)`r{wbIAJo8Rzi01H$1Y=HNVLFl<&y7n?{M|}Rh}Y(^B0U? zbeG~LsIp|fIV-R&^Ek_rhLqh&8O$>~CH^<;LwQ@9%lGK-yDT1}Ddnm+ot5qtdXIff zL+nH+ytt=o$M=>N;NEt)Ao~^wIq+Jh`-GO-Xvwi-^64WV{Q2V#-Sw#~TI*}>{`o#9 z+fSuYbx+QnkfO2!EV&G@YXo%z&k#my;ic=R8K3dzG)4`WHH#!8EPF|gOJ?3xytcup zYrVrk>XJnvSpB}Y73&t#@aD(n_*-A7n$quY?P_|bxgc)>1<)Hq-7EF^E0c#_i=X!o z$X>xnOZ_g$x6)gR+uc)6_j#3RgfLv(dh(+~&N?|Q80@*d-=bgKmod~~(K1aYX{3$b zE%ndjp*IMzDUs9|qGTm_|E>daB*vkav$s|Y!2TbeS!iOkOb=qdVQjGShl_a|!%Vxz zzO6u=ecL)LXu095TMDB5b#bp{b-nMg)>uN#ue@DS61Roi83v#=Y$i4GPMr z^x_y~(O16Q-g4oBSY%_8%3wUkp6f`JS)whcw*}EUf7Fmi2s&xdsGv6|Wg#f`;`zja zM)2{_AK1+uU~Cs0b$`=NxP z9|<(s_N`bDcK7kWeW83?aWgOIc09f+3Tk}gl-eWxSZF<~LgQB+_=y(E%zfv)U*}<+ zm6}OJdRGFgSGfOib@F^WsNG>*w_n~D1Rt^^zc3Ym^SafeUY>dr*4PC;;R&uYn3H4m#)jM8 z7`2Hu=c}cTW)V1Nk)qMltH~?BjHXS*m207^^f0f|JuYd1qeveVI-`49VN4gY;}4Pj4&JfyxdZspEs2g4xG&)=;tvU>!M(n^pV4Z~E(mDdw~FrP`~&dCU8Bz5?^Mw=C|SH>Wzt zmXdFcroOfDT4cy~jO>RH8pX)*<)Jf8owCp9%=4tD&H6%R*AFOzCp9HHyr;;tDqp3@ zadC0+Bv8Q$anG`RcmH0IqK=ws1lf|k)q{t}^{cS}u=*V!)sutEqQ=V`e5yy$x zj#6!qMBz@X2-dpvC#60^6I#Ir5pgK4<#f3iJa4Is<9GgiUy$Va-*wXEmfh1YO=rx- z{eknkxX?wW@bnEnBug>>Jf`+3+TLVG@91yVLwg}8*C#k8DY(_3eW~(Q#YXUv52n6c zQ=_twC^A16)dn>VSYt#Fi9{1hzkk%m3p z96#hxT%*DdK!~H7{Sa?jF$F25n$Fgs{2y1hU8R^~Q;^i?J%Y8)QeCOgd`}CP^DizP zx^*zb^UU+GDOqtg5>37@M?y=6dW3XJkzPDQ#^lc0bz8|i%18m_Cj>{F2gG<7P;|Ceqk0u(82eTJ$+6Vvj$JC6@57@&8dWL^4lk{lW2ST85>Ob^ksvkj2_n zI+s4jY%0nbc}`w2XtM;7z0QH8fW|j=1`T@0@dFdn3D!19=hil z(G_hZIM(}xSbufUW441$>7D$>1R~hN_KThWv%?XA_l^`Rn46qrHBjnyPW`S_jDLny z5$8q#b$aTqYz0eRZBy$DRuv1qNAoT4+h*=}qJFNHfaWsB4IwR~SrHZf3U1H7?u!|` z?jG z1iq`2{UnT)8S7#(?wVefD~K|(kvD7u=y9U=%rSgl1gY*<1?u$2#D6dyt^ zObakp|7C~R&!*dcHH30vMtJ(xNjIF0aR4I%cEjt>%|%rQ(<#!;^u4R0wlYdyic>!g zz$7Z2$vBRgJaDB~+}e!_oBKenGV?WYX=(kFPNIkJor zxM%Vz3f9@paa!$qt<|vLZP(1?&i~(cOn!#*{&GjbSz=FdxCh~Q5tFM1@6$+n0$ ziLH;pZ6(Qf0Orw~FC%$xhzxEw_D|hUdx>^TQSKc#eFx_${!dQW}VA^E^Kcej#U zgu@1*P!NkHOZFeWmx zzI~J_%pCJy0L3QxB*|UqFR)V7AFG)FU?_in8~%KPD2U>Yce10Yw^yTf%7PLu1rfe}%}G@-UeYw6XW$>aKRfor zzctMJ|9W)avXHHa^tQ*Xw}GilLK@4v{JV*AdYPOY4NUBV>oXA(O5rrZSmPvz4R!Uw z295f=K!fBU^d7w7A0;`ls}^_WI@f$CZ$$W>mxGGWPMzed^N zeLU1`o(6}4kw&Bb{VbEC)W^n;R*%(Eng$XuOC_}OYFZuv{mKYsWEuG1_Yq zyh+y)7&V4a82@hbvZ$%4k8B)(Uy_3g6w+KI6ouZKL_CfGgco~Y1x^i4B+YO6eWEB9 zD&|1GqZDC(EtRR245^etjE5C!&DMLbJZa6=HZBC(F)gUW`=5Pn5WU87)ny+olpC3I z4uw<3Sx}jg02LZIk5|kNHT3L7x21Xc_9x@>IF2|OS59v+=rmrrOS?oNelTrO==ERK zzm3@X$WmmxNs+(R3^|G#{AO77_}o!e&SShdBRqprb;AJM6c*Rwvw9XX_IAT}lep5& zPb?+aza(L9fSk?mln4*r${bFyM@9bJ={IFEtvNoC%v+J|JI-&}$@vWt%!X5cRp8)B z-eWb;MqF-b{F2!bQ_f)BD#YhUzE~*HsEZ}~n8h<*>@dq~SpV8!4SE!p5p9=G=s;|% z_$K4=R3;54?vM}lW^yL`eR-y)f#oCHuf^vZw%)F7tP!!afIsVHxV7@QiQFs8L|5yB zSOq^LN;p+Pw%Di$^{#z=0p70NR!7-r_LguAq`fyxq zXZ|OJ4x_w(0Eu_{md@3f?1!7uDmp-Q-aOmW*yQi83AX}PqhwH%9sg*tKF=P0|M88Yx(1>(vpYb8 zQb)c0USPVaOzzd2XYG*BK6;bie*i9YzH-c5O!?V9-*6V3bu4NUWFh7fA@-zA&Q{{t z&uN>jCxi18D*s6?Bi%7hWV2~69MYiOQsE0Gzg4TU>2W2~Gj3E^XFO-R)ZhHR(Op$8 zlqk<5tn9V=*O!=5nJjrv;(Z-+=92veGO8arEX&3}f)CEFmD;B&Bp1VKQRM4*-{^Ww zKWTK{ap(A3NTZM>N3d=DkEJZzhhhYcp8cbw!?=8+iKEj34wGueyeillM!ltg%7ByLLXa{a>vS;IxN#BCS}jOqTLG{FfxlmLtE)8+``Q{=yq> z_g%0T*Fk5FI9SYuz?rLc9;J(d7N2~0alAbQlZ@fy+*$k(w56&0T1M%4t!4h#dHmF( zLKYj^c`1XAm)M_I;m<}6`#R#)lFW5gYv~}IM5>ULMk>^?)Dfd+T($&8RW?NSeyb6X z{~{4yOnMGGE@GZIqNydf-=O7&YllKnR5VprMuSRwBZ_Or$Kj&b#tFDH0YZatQj6|mVRhR2lT@G+krF`~QW2*mPpj+N?dzM5ue!0CTt8-f zR8Y38A!U+!QdmiP`O9NIlj4Qs%}&X)s_NA-UPQCLN>XSeiXyN~(s{qed0HKkPlPd=*bLss{ zLc7+wY;ZqqvYX#ps#)!z+xd*}`6w+9x_|-s5TXFxG32Qwr_$cI$}h0+FMByXB$I*d z%=aJ(-b?4s&^XXN?6u%t^zLpxoNSa)r3oJ0+#@WRWNvOV(0m_0^$ueBn^uNE!LA!4 zLk$bGE&seB`byeOMEwP96v7k3-850ZC*AAq?{)euvG5MiW*L>MF zqGi0@Ax^k}@98m5g{PEFa&ZOn#6+EILMI~jT?Y5wF22Np4$@L&fC4pi1`Q0NuJ4)l>B&==vN;s$0wb9bTD5ZVmVEx;#*mKbO@eG!~ON=KbDenPq zLkqceH?JO86IBQxUdg>ybQVIQTAc0$&o1nijEc(e(=wIw$9Xyl%Rl<|ShVtJu?BlV z)Wx%DA-u_x5p)L_D5rEbjn~`cf~#J`_4c8B6k)qi3w3meYI>_FfA>4i!9YdO2y=^aGjHZx(&IU*PXnBr0YDhpT1Op z?2t2MC8dh7J>gEe5Z3PYBn+S|DVmE>p2%XVT99o6RRC`; zXk+ZSZeOnOb%P;P_xKPA9DDPvG%)@lFVW=ei;$C?IXzd}yuvVYy`LAv?~;`OTd{iD z5fWSvPUH6gzK5I%j800k8NiSK>cT@-uYqdyIM%PUJDd-Mfx%DXOfaVkEMsqd2T@UV)QaG?2~aIn-|;R^gW+5YDO- zAtG?CZPfW|XP&KiQ&3n70VB?rc}KK;Iespw7#FIO|1LStuua>2Ga~Byf59u;-@_vh zQc`G&eJ;JIjVwFnt?-sjob1uhd#9b9{%G^cBqbYvaANCcE>eWYS`pYF;X8nlRKk;C=71Atsl>{lc;fs3g39pA%qGt(!xjC5ot|#I{;t%w z=w|KP#MTbu}efx)1gyp_@|+qY=@QIekZKw7jq@{N;^{*HYW~ z7#wYbUGGsi_{kS(UFmgO^_s}3$S2WqJMGoZry(57v>n^VjDf7EvE9)IAp&SvZ z%$Q^N*E6Tr9c>_B@%1Yu7L`&(g^-K0_?|FmdM|#apwYq@2zC{O<95XE&HjT4aDZJlvBzKItd5*A#$ z&wSwe?d%-V;>2x|;b%k}gR))I@tn@^Fx5{MKf z!SsmN2Z@9BEN_R!-HVosAPPEupF6mKH!gyL)WvpZy7e5u82Fl!fYC~Rt_QfGp+yL* z``vGukEhFTC1+sZ`v?^0+Gp^~Moa4j38}JK54AI*k$fmUMMN}NSxoSsk*c2M2c@d> z7(jB|bJ`>x2}3Q;q>fN`(SKo?y!+QEILDZ*JW+<6zURzn6lx@!q^RgnTid*@G`Lsb z#uKwEZrSjfEKTP$spcfVAtNJN%+r-LY{TznU1=)e6MgRpdPW4=Zg20D@cSh(-Pi2} zg@jAyWHivkgH*MT+XJ4RtteYng@+Q)&W0-S+LA_Kj(4_?RCbx4ekf#MLi8CCmhk7* z@ef@VVD0T)-!Zt+rT&HkDja3RuU>pws7kYki1YZWj*zh?DHVbpD6h9aCyOutTPPd*yE(S^HLxn&UW<4AYR&8?hRV!3_o zh^UABQ)LTwA@U962zz~v?p~gu|3$kRQn+S8W-WDO5mQTggNSuO15@g8)V#bz z diMwg}v}8{4M4gIqeK+l7+vinP)e?7e{~sY^|C|5- diff --git a/examples/data_process/tutorial_fast_affine_transform.py b/examples/data_process/tutorial_fast_affine_transform.py deleted file mode 100644 index 85860e05a..000000000 --- a/examples/data_process/tutorial_fast_affine_transform.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -Tutorial of fast affine transformation. -To run this tutorial, install opencv-python using pip. - -Comprehensive explanation of this tutorial can be found https://tensorlayer.readthedocs.io/en/stable/modules/prepro.html -""" - -import multiprocessing -import time - -import numpy as np - -import cv2 -import tensorflow as tf -import tensorlayer as tl - -# tl.logging.set_verbosity(tl.logging.DEBUG) -image = tl.vis.read_image('data/tiger.jpeg') -h, w, _ = image.shape - - -def create_transformation_matrix(): - # 1. Create required affine transformation matrices - ## fixed - # M_rotate = tl.prepro.affine_rotation_matrix(angle=20) - # M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1) - # M_shift = tl.prepro.affine_shift_matrix(wrg=0.1, hrg=0, h=h, w=w) - # M_shear = tl.prepro.affine_shear_matrix(x_shear=0.2, y_shear=0) - # M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8) - ## random - M_rotate = tl.prepro.affine_rotation_matrix(angle=(-20, 20)) - M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=0.5) - M_shift = tl.prepro.affine_shift_matrix(wrg=(-0.1,0.1), hrg=(-0.1,0.1), h=h, w=w) - M_shear = tl.prepro.affine_shear_matrix(x_shear=(-0.2,0.2), y_shear=(-0.2,0.2)) - M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=(0.8,1.2)) - - # 2. Combine matrices - # NOTE: operations are applied in a reversed order (i.e., rotation is performed first) - M_combined = M_shift.dot(M_zoom).dot(M_shear).dot(M_flip).dot(M_rotate) - - # 3. Convert the matrix from Cartesian coordinates (the origin in the middle of image) - # to image coordinates (the origin on the top-left of image) - transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h) - return transform_matrix - - -def example1(): - """ Example 1: Applying transformation one-by-one is very SLOW ! """ - st = time.time() - for _ in range(100): # Try 100 times and compute the averaged speed - xx = tl.prepro.rotation(image, rg=-20, is_random=False) - xx = tl.prepro.flip_axis(xx, axis=1, is_random=False) - xx = tl.prepro.shear2(xx, shear=(0., -0.2), is_random=False) - xx = tl.prepro.zoom(xx, zoom_range=1 / 0.8) - xx = tl.prepro.shift(xx, wrg=-0.1, hrg=0, is_random=False) - print("apply transforms one-by-one took %fs for each image" % ((time.time() - st) / 100)) - tl.vis.save_image(xx, '_result_slow.png') - - -def example2(): - """ Example 2: Applying all transforms in one is very FAST ! """ - st = time.time() - for _ in range(100): # Repeat 100 times and compute the averaged speed - transform_matrix = create_transformation_matrix() - result = tl.prepro.affine_transform_cv2(image, transform_matrix, border_mode='replicate') # Transform the image using a single operation - tl.vis.save_image(result, '_result_fast_{}.png'.format(_)) - print("apply all transforms once took %fs for each image" % ((time.time() - st) / 100)) # usually 50x faster - tl.vis.save_image(result, '_result_fast.png') - - -def example3(): - """ Example 3: Using TF dataset API to load and process image for training """ - n_data = 100 - imgs_file_list = ['data/tiger.jpeg'] * n_data - train_targets = [np.ones(1)] * n_data - - def generator(): - if len(imgs_file_list) != len(train_targets): - raise RuntimeError('len(imgs_file_list) != len(train_targets)') - for _input, _target in zip(imgs_file_list, train_targets): - yield _input, _target - - def _data_aug_fn(image): - transform_matrix = create_transformation_matrix() - result = tl.prepro.affine_transform_cv2(image, transform_matrix) # Transform the image using a single operation - return result - - def _map_fn(image_path, target): - image = tf.io.read_file(image_path) - image = tf.image.decode_jpeg(image, channels=3) # Get RGB with 0~1 - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - image = tf.numpy_function(_data_aug_fn, [image], [tf.float32])[0] - target = tf.reshape(target, ()) - return image, target - - n_epoch = 10 - batch_size = 5 - dataset = tf.data.Dataset.from_generator(generator, output_types=(tf.string, tf.int64)) - dataset = dataset.shuffle(buffer_size=4096) # shuffle before loading images - dataset = dataset.repeat(n_epoch) - dataset = dataset.map(_map_fn, num_parallel_calls=multiprocessing.cpu_count()) - dataset = dataset.batch(batch_size) # TODO: consider using tf.contrib.map_and_batch - dataset = dataset.prefetch(1) # prefetch 1 batch - - n_step = 0 - st = time.time() - for img, target in dataset: - n_step += 1 - pass - assert n_step == n_epoch * n_data / batch_size - print("dataset APIs took %fs for each image" % ((time.time() - st) / batch_size / n_step)) # CPU ~ 100% - - -def example4(): - """ Example 4: Transforming coordinates using affine matrix. """ - transform_matrix = create_transformation_matrix() - result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster - # Transform keypoint coordinates - coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]] - coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix) - - def imwrite(image, coords_list, name): - coords_list_ = [] - for coords in coords_list: - coords = np.array(coords, np.int32) - coords = coords.reshape((-1, 1, 2)) - coords_list_.append(coords) - image = cv2.polylines(image, coords_list_, True, (0, 255, 255), 3) - cv2.imwrite(name, image[..., ::-1]) - - imwrite(image, coords, '_with_keypoints_origin.png') - imwrite(result, coords_result, '_with_keypoints_result.png') - - -if __name__ == '__main__': - example1() - example2() - example3() - example4() diff --git a/examples/data_process/tutorial_tf_dataset_voc.py b/examples/data_process/tutorial_tf_dataset_voc.py deleted file mode 100644 index e430601b2..000000000 --- a/examples/data_process/tutorial_tf_dataset_voc.py +++ /dev/null @@ -1,113 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- - -# tf import data dataset.map https://www.tensorflow.org/programmers_guide/datasets#applying_arbitrary_python_logic_with_tfpy_func -# tf.py_func https://www.tensorflow.org/api_docs/python/tf/py_func -# tl ref: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_imagenet_inceptionV3_distributed.py -# cn ref: https://blog.csdn.net/dQCFKyQDXYm3F8rB0/article/details/79342369 -# cn ref: https://zhuanlan.zhihu.com/p/31466173 - -import json -import multiprocessing -import random -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -# tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -imgs_file_list, _, _, _, classes, _, _, _, objs_info_list, _ = tl.files.load_voc_dataset(dataset="2007") - -ann_list = [] -for info in objs_info_list: - ann = tl.prepro.parse_darknet_ann_str_to_list(info) - c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann) - ann_list.append([c, b]) - -n_epoch = 10 -batch_size = 64 -im_size = [416, 416] -jitter = 0.2 -shuffle_buffer_size = 100 - - -def generator(): - inputs = imgs_file_list - targets = objs_info_list - - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - - for _input, _target in zip(inputs, targets): - yield _input.encode('utf-8'), _target.encode('utf-8') - - -def _data_aug_fn(im, ann): - ## parse annotation - ann = ann.decode() - ann = tl.prepro.parse_darknet_ann_str_to_list(ann) - clas, coords = tl.prepro.parse_darknet_ann_list_to_cls_box(ann) - ## random brightness, contrast and saturation (tf.image API is faster) - # im = tl.prepro.brightness(im, gamma=0.5, gain=1, is_random=True) - # im = tl.prepro.illumination(im, gamma=(0.5, 1.5), - # contrast=(0.5, 1.5), saturation=(0.5, 1.5), is_random=True) # TypeError: Cannot handle this data type - ## random horizontal flip - im, coords = tl.prepro.obj_box_left_right_flip(im, coords, is_rescale=True, is_center=True, is_random=True) - ## random resize and crop - tmp0 = random.randint(1, int(im_size[0] * jitter)) - tmp1 = random.randint(1, int(im_size[1] * jitter)) - im, coords = tl.prepro.obj_box_imresize(im, coords, [im_size[0] + tmp0, im_size[1] + tmp1], \ - is_rescale=True, interp='bicubic') - im, clas, coords = tl.prepro.obj_box_crop(im, clas, coords, wrg=im_size[1], hrg=im_size[0], \ - is_rescale=True, is_center=True, is_random=True) - ## value [0, 255] to [-1, 1] (optional) - # im = im / 127.5 - 1 - ## value [0, 255] to [0, 1] (optional) - im = im / 255 - im = np.array(im, dtype=np.float32) # important - return im, str([clas, coords]).encode('utf-8') - - -def _map_fn(filename, annotation): - ## read image - image = tf.io.read_file(filename) - image = tf.image.decode_jpeg(image, channels=3) - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - ## data augmentation for image only 0.02s - image = tf.image.random_brightness(image, max_delta=63) - image = tf.image.random_contrast(image, lower=0.2, upper=1.8) - # subtract off the mean and divide by the variance of the pixels. (optional) - # img = tf.image.per_image_standardization(img) - ## data augmentation for image and bounding box - image, annotation = tf.numpy_function(_data_aug_fn, [image, annotation], [tf.float32, tf.string]) - return image, annotation - - -ds = tf.data.Dataset.from_generator(generator, output_types=(tf.string, tf.string)) -ds = ds.shuffle(shuffle_buffer_size) -ds = ds.map(_map_fn, num_parallel_calls=multiprocessing.cpu_count()) -ds = ds.repeat(n_epoch) -ds = ds.prefetch(buffer_size=2048) -ds = ds.batch(batch_size) - -st = time.time() -im, annbyte = next(iter(ds)) -print('took {}s'.format(time.time() - st)) - -im = im.numpy() - -ann = [] -for a in annbyte: - a = a.numpy().decode() - ann.append(json.loads(a)) - -## save all images -for i in range(len(im)): - print(ann[i][1]) - tl.vis.draw_boxes_and_labels_to_image( - im[i] * 255, ann[i][0], ann[i][1], [], classes, True, save_name='_bbox_vis_%d.png' % i - ) diff --git a/examples/data_process/tutorial_tfrecord.py b/examples/data_process/tutorial_tfrecord.py deleted file mode 100644 index bd656a960..000000000 --- a/examples/data_process/tutorial_tfrecord.py +++ /dev/null @@ -1,98 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""You will learn. - -1. How to save data into TFRecord format file. -2. How to read data from TFRecord format file. - -Reference: ------------ -English : https://www.tensorflow.org/alpha/tutorials/load_data/images#build_a_tfdatadataset - https://www.tensorflow.org/alpha/tutorials/load_data/tf_records#tfrecord_files_using_tfdata -Chinese : http://blog.csdn.net/u012759136/article/details/52232266 - https://github.com/ycszen/tf_lab/blob/master/reading_data/TensorFlow高效加载数据的方法.md - -More ------- -1. tutorial_tfrecord2.py -2. tutorial_cifar10_tfrecord.py - -""" - -import os - -import numpy as np -import tensorflow as tf -from PIL import Image - -import tensorlayer as tl - -## Save data ================================================================== -# see https://www.tensorflow.org/alpha/tutorials/load_data/tf_records#writing_a_tfrecord_file -classes = ['/data/cat', '/data/dog'] # cat is 0, dog is 1 -cwd = os.getcwd() -writer = tf.io.TFRecordWriter("train.tfrecords") -for index, name in enumerate(classes): - class_path = cwd + name + "/" - for img_name in os.listdir(class_path): - img_path = class_path + img_name - img = Image.open(img_path) - img = img.resize((224, 224)) - ## Visualize the image as follow: - # tl.visualize.frame(I=img, second=5, saveable=False, name='frame', fig_idx=12836) - ## Converts a image to bytes - img_raw = img.tobytes() - ## Convert the bytes back to image as follow: - # image = Image.frombytes('RGB', (224,224), img_raw) - # tl.visualize.frame(I=image, second=1, saveable=False, name='frame', fig_idx=1236) - ## Write the data into TF format - # image : Feature + BytesList - # label : Feature + Int64List or FloatList - # sentence : FeatureList + Int64List , see Google's im2txt example - example = tf.train.Example(features=tf.train.Features(feature={ # SequenceExample for seuqnce example - "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), - 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), - })) - writer.write(example.SerializeToString()) # Serialize To String -writer.close() - -## Load Data Method 1: Simple read ============================================ -# see https://www.tensorflow.org/alpha/tutorials/load_data/tf_records#reading_a_tfrecord_file_2 -# read data one by one in order -raw_dataset = tf.data.TFRecordDataset("train.tfrecords") -for serialized_example in raw_dataset: - example = tf.train.Example() # SequenceExample for seuqnce example - example.ParseFromString(serialized_example.numpy()) - img_raw = example.features.feature['img_raw'].bytes_list.value - label = example.features.feature['label'].int64_list.value - ## converts a image from bytes - image = Image.frombytes('RGB', (224, 224), img_raw[0]) - # tl.visualize.frame(np.asarray(image), second=0.5, saveable=False, name='frame', fig_idx=1283) - print(label) - - -## Read Data Method 2: using tf.data ======================================= -# see https://www.tensorflow.org/alpha/tutorials/load_data/tf_records#reading_a_tfrecord_file -# use shuffle and batch -def read_and_decode(filename): - # generate a queue with a given file name - raw_dataset = tf.data.TFRecordDataset([filename]).shuffle(1000).batch(4) - for serialized_example in raw_dataset: - features = tf.io.parse_example( - serialized_example, features={ - 'label': tf.io.FixedLenFeature([], tf.int64), - 'img_raw': tf.io.FixedLenFeature([], tf.string), - } - ) - # You can do more image distortion here for training data - img_batch = tf.io.decode_raw(features['img_raw'], tf.uint8) - img_batch = tf.reshape(img_batch, [4, 224, 224, 3]) - # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 - label_batch = tf.cast(features['label'], tf.int32) - yield img_batch, label_batch - - -img_batch, label_batch = next(read_and_decode("train.tfrecords")) -print("img_batch : %s" % img_batch.shape) -print("label_batch : %s" % label_batch.shape) -tl.visualize.images2d(img_batch, second=1, saveable=False, name='batch', dtype=None, fig_idx=2020121) diff --git a/examples/data_process/tutorial_tfrecord2.py b/examples/data_process/tutorial_tfrecord2.py deleted file mode 100755 index 163d2a64f..000000000 --- a/examples/data_process/tutorial_tfrecord2.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""You will learn. - -1. How to convert CIFAR-10 dataset into TFRecord format file. -2. How to read CIFAR-10 from TFRecord format file. - -More: -1. tutorial_tfrecord.py -2. tutoral_cifar10_tfrecord.py - -""" - -import os - -import numpy as np -# import matplotlib -# matplotlib.use('GTK') -import tensorflow as tf - -import tensorlayer as tl - -# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - -X_train = np.asarray(X_train, dtype=np.uint8) -y_train = np.asarray(y_train, dtype=np.int64) -X_test = np.asarray(X_test, dtype=np.float32) -y_test = np.asarray(y_test, dtype=np.int64) - -print('X_train.shape', X_train.shape) # (50000, 32, 32, 3) -print('y_train.shape', y_train.shape) # (50000,) -print('X_test.shape', X_test.shape) # (10000, 32, 32, 3) -print('y_test.shape', y_test.shape) # (10000,) -print('X %s y %s' % (X_test.dtype, y_test.dtype)) - -cwd = os.getcwd() -writer = tf.io.TFRecordWriter("train.cifar10") -for index, img in enumerate(X_train): - img_raw = img.tobytes() - ## Visualize a image - # tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) - label = int(y_train[index]) - # print(label) - ## Convert the bytes back to image as follow: - # image = Image.frombytes('RGB', (32, 32), img_raw) - # image = np.fromstring(img_raw, np.float32) - # image = image.reshape([32, 32, 3]) - # tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236) - example = tf.train.Example( - features=tf.train.Features( - feature={ - "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), - 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), - } - ) - ) - writer.write(example.SerializeToString()) # Serialize To String -writer.close() - - -## Read Data by Queue and Thread ======================================= -def read_and_decode(filename): - batchsize = 4 - raw_dataset = tf.data.TFRecordDataset([filename]).shuffle(1000).batch(batchsize) - for serialized_example in raw_dataset: - features = tf.io.parse_example( - serialized_example, features={ - 'label': tf.io.FixedLenFeature([], tf.int64), - 'img_raw': tf.io.FixedLenFeature([], tf.string), - } - ) - # You can do more image distortion here for training data - img_batch = tf.io.decode_raw(features['img_raw'], tf.uint8) - img_batch = tf.reshape(img_batch, [-1, 32, 32, 3]) - # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5 # don't need to cast here, as it is float32 already - label_batch = tf.cast(features['label'], tf.int32) - yield img_batch, label_batch - - -img_batch, label_batch = next(read_and_decode("train.tfrecords")) -print("img_batch : %s" % img_batch.shape) -print("label_batch : %s" % label_batch.shape) - -i = 0 -for img_batch, label_batch in read_and_decode("train.cifar10"): - tl.visualize.images2d(img_batch, second=1, saveable=False, name='batch' + str(i), dtype=np.uint8, fig_idx=2020121) - i += 1 - if i >= 3: - break diff --git a/examples/data_process/tutorial_tfrecord3.py b/examples/data_process/tutorial_tfrecord3.py deleted file mode 100644 index 9e5751a25..000000000 --- a/examples/data_process/tutorial_tfrecord3.py +++ /dev/null @@ -1,464 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" -You will learn. - -1. How to save time-series data (e.g. sentence) into TFRecord format file. -2. How to read time-series data from TFRecord format file. -3. How to create inputs, targets and mask. - -Reference ----------- -1. Google's im2txt - MSCOCO Image Captioning example -2. TFRecord in http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/ -3. Batching and Padding data in http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/ - -""" - -import json -import os - -import numpy as np -import tensorflow as tf -from PIL import Image - -import tensorlayer as tl - - -def _int64_feature(value): - """Wrapper for inserting an int64 Feature into a SequenceExample proto, - e.g, An integer label. - """ - return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) - - -def _bytes_feature(value): - """Wrapper for inserting a bytes Feature into a SequenceExample proto, - e.g, an image in byte - """ - # return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def _int64_feature_list(values): - """Wrapper for inserting an int64 FeatureList into a SequenceExample proto, - e.g, sentence in list of ints - """ - return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) - - -def _bytes_feature_list(values): - """Wrapper for inserting a bytes FeatureList into a SequenceExample proto, - e.g, sentence in list of bytes - """ - return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) - - -# 1. Save data into TFRecord ===================================================== -cwd = os.getcwd() -IMG_DIR = cwd + '/data/cat/' -SEQ_FIR = cwd + '/data/cat_caption.json' -VOC_FIR = cwd + '/vocab.txt' -# read image captions from JSON -with tf.gfile.FastGFile(SEQ_FIR, "r") as f: - caption_data = json.loads(str(f.read())) # , encoding = "utf-8")) - -processed_capts, img_capts = [], [] -for idx in range(len(caption_data['images'])): - img_capt = caption_data['images'][idx]['caption'] - img_capts.append(img_capt) - processed_capts.append(tl.nlp.process_sentence(img_capt, start_word="", end_word="")) -print("Original Captions: %s" % img_capts) -print("Processed Captions: %s\n" % processed_capts) -# build vocab -_ = tl.nlp.create_vocab(processed_capts, word_counts_output_file=VOC_FIR, min_word_count=1) -vocab = tl.nlp.Vocabulary(VOC_FIR, start_word="", end_word="", unk_word="") - -# save -writer = tf.python_io.TFRecordWriter("train.cat_caption") -for idx in range(len(caption_data['images'])): - # get data - img_name = caption_data['images'][idx]['file_name'] - img_capt = ' ' + caption_data['images'][idx]['caption'] + ' ' - img_capt_ids = [vocab.word_to_id(word) for word in img_capt.split(' ')] - print("%s : %s : %s" % (img_name, img_capt, img_capt_ids)) - img = Image.open(IMG_DIR + img_name) - img = img.resize((299, 299)) - # tl.visualize.frame(I=img, second=0.2, saveable=False, name=img_name, fig_idx=12234) - img_raw = img.tobytes() - img_capt_b = [v.encode() for v in img_capt.split(' ')] - context = tf.train.Features(feature={ # Non-serial data uses Feature - "image/img_raw": _bytes_feature(img_raw), - }) - feature_lists = tf.train.FeatureLists( - feature_list={ # Serial data uses FeatureLists - "image/caption": _bytes_feature_list(img_capt_b), - "image/caption_ids": _int64_feature_list(img_capt_ids) - }) - sequence_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists) - writer.write(sequence_example.SerializeToString()) # Serialize To String -writer.close() - -# 2. Simple read one image ======================================================= -filename_queue = tf.train.string_input_producer(["train.cat_caption"]) -reader = tf.TFRecordReader() -_, serialized_example = reader.read(filename_queue) # return the file and the name of file -# features, sequence_features = tf.parse_single_example(serialized_example, # see parse_single_sequence_example for sequence example -features, sequence_features = tf.parse_single_sequence_example( - serialized_example, context_features={ - 'image/img_raw': tf.FixedLenFeature([], tf.string), - }, sequence_features={ - "image/caption": tf.FixedLenSequenceFeature([], dtype=tf.string), - "image/caption_ids": tf.FixedLenSequenceFeature([], dtype=tf.int64), - } -) -c = tf.contrib.learn.run_n(features, n=1, feed_dict=None) -im = Image.frombytes('RGB', (299, 299), c[0]['image/img_raw']) -tl.visualize.frame(np.asarray(im), second=1, saveable=False, name='frame', fig_idx=1236) -c = tf.contrib.learn.run_n(sequence_features, n=1, feed_dict=None) -print(c[0]) - - -# 3. Prefetch serialized SequenceExample protos ================================== -def distort_image(image, thread_id): - """Perform random distortions on an image. - Args: - image: A float32 Tensor of shape [height, width, 3] with values in [0, 1). - thread_id: Preprocessing thread id used to select the ordering of color - distortions. There should be a multiple of 2 preprocessing threads. - Returns:```` - distorted_image: A float32 Tensor of shape [height, width, 3] with values in - [0, 1]. - """ - # Randomly flip horizontally. - with tf.name_scope("flip_horizontal"): # , values=[image]): # DH MOdify - # with tf.name_scope("flip_horizontal", values=[image]): - image = tf.image.random_flip_left_right(image) - # Randomly distort the colors based on thread id. - color_ordering = thread_id % 2 - with tf.name_scope("distort_color"): # , values=[image]): # DH MOdify - # with tf.name_scope("distort_color", values=[image]): # DH MOdify - if color_ordering == 0: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.032) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - elif color_ordering == 1: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.032) - # The random_* ops do not necessarily clamp. - image = tf.clip_by_value(image, 0.0, 1.0) - - return image - - -# def process_image(encoded_image, -# is_training, -# height, -# width, -# resize_height=346, -# resize_width=346, -# thread_id=0, -# image_format="jpeg"): -# """Decode an image, resize and apply random distortions. -# In training, images are distorted slightly differently depending on thread_id. -# Args: -# encoded_image: String Tensor containing the image. -# is_training: Boolean; whether preprocessing for training or eval. -# height: Height of the output image. -# width: Width of the output image. -# resize_height: If > 0, resize height before crop to final dimensions. -# resize_width: If > 0, resize width before crop to final dimensions. -# thread_id: Preprocessing thread id used to select the ordering of color -# distortions. There should be a multiple of 2 preprocessing threads. -# image_format: "jpeg" or "png". -# Returns: -# A float32 Tensor of shape [height, width, 3] with values in [-1, 1]. -# Raises: -# ValueError: If image_format is invalid. -# """ -# # Helper function to log an image summary to the visualizer. Summaries are -# # only logged in thread 0. -# def image_summary(name, image): -# if not thread_id: -# tf.image_summary(name, tf.expand_dims(image, 0)) -# -# # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1). -# with tf.name_scope("decode"):#, values=[encoded_image]): # DH modify -# # with tf.name_scope("decode", values=[encoded_image]): # DH modify -# if image_format == "jpeg": -# image = tf.image.decode_jpeg(encoded_image, channels=3) -# elif image_format == "png": -# image = tf.image.decode_png(encoded_image, channels=3) -# else: -# raise ValueError("Invalid image format: %s" % image_format) -# image = tf.image.convert_image_dtype(image, dtype=tf.float32) -# image_summary("original_image", image) -# -# # Resize image. -# assert (resize_height > 0) == (resize_width > 0) -# if resize_height: -# # image = tf.image.resize_images(image, -# # size=[resize_height, resize_width], -# # method=tf.image.ResizeMethod.BILINEAR) -# -# image = tf.image.resize_images(image, # DH Modify -# new_height=resize_height, -# new_width=resize_width, -# method=tf.image.ResizeMethod.BILINEAR) -# -# # Crop to final dimensions. -# if is_training: -# image = tf.random_crop(image, [height, width, 3]) -# else: -# # Central crop, assuming resize_height > height, resize_width > width. -# image = tf.image.resize_image_with_crop_or_pad(image, height, width) -# -# image_summary("resized_image", image) -# -# # Randomly distort the image. -# if is_training: -# image = distort_image(image, thread_id) -# -# image_summary("final_image", image) -# -# # Rescale to [-1,1] instead of [0, 1] -# image = tf.subtract(image, 0.5) -# image = tf.multiply(image, 2.0) -# return image - - -def prefetch_input_data( - reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=16, - num_reader_threads=1, shard_queue_name="filename_queue", value_queue_name="input_queue" -): - """Prefetches string values from disk into an input queue. - - In training the capacity of the queue is important because a larger queue - means better mixing of training examples between shards. The minimum number of - values kept in the queue is values_per_shard * input_queue_capacity_factor, - where input_queue_memory factor should be chosen to trade-off better mixing - with memory usage. - - Args: - reader: Instance of tf.ReaderBase. - file_pattern: Comma-separated list of file patterns (e.g. - /tmp/train_data-?????-of-00100). - is_training: Boolean; whether prefetching for training or eval. - batch_size: Model batch size used to determine queue capacity. - values_per_shard: Approximate number of values per shard. - input_queue_capacity_factor: Minimum number of values to keep in the queue - in multiples of values_per_shard. See comments above. - num_reader_threads: Number of reader threads to fill the queue. - shard_queue_name: Name for the shards filename queue. - value_queue_name: Name for the values input queue. - - Returns: - A Queue containing prefetched string values. - """ - data_files = [] - for pattern in file_pattern.split(","): - data_files.extend(tf.gfile.Glob(pattern)) - if not data_files: - tl.logging.fatal("Found no input files matching %s", file_pattern) - else: - tl.logging.info("Prefetching values from %d files matching %s", len(data_files), file_pattern) - - if is_training: - print(" is_training == True : RandomShuffleQueue") - filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16, name=shard_queue_name) - min_queue_examples = values_per_shard * input_queue_capacity_factor - capacity = min_queue_examples + 100 * batch_size - values_queue = tf.RandomShuffleQueue( - capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string], - name="random_" + value_queue_name - ) - else: - print(" is_training == False : FIFOQueue") - filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1, name=shard_queue_name) - capacity = values_per_shard + 3 * batch_size - values_queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name) - - enqueue_ops = [] - for _ in range(num_reader_threads): - _, value = reader.read(filename_queue) - enqueue_ops.append(values_queue.enqueue([value])) - tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops)) - - tf.summary.scalar( - "queue/%s/fraction_of_%d_full" % (values_queue.name, capacity), - tf.cast(values_queue.size(), tf.float32) * (1. / capacity) - ) - - return values_queue - - -is_training = True -resize_height = resize_width = 346 -height = width = 299 -# start to read -reader = tf.TFRecordReader() -input_queue = prefetch_input_data( - reader, - file_pattern="train.cat_caption", # sets train.???_caption to read many files - is_training=is_training, # if training, shuffle and random choice - batch_size=4, - values_per_shard=2300, # mixing between shards in training. - input_queue_capacity_factor=2, # minimum number of shards to keep in the input queue. - num_reader_threads=1 # number of threads for prefetching SequenceExample protos. -) -serialized_sequence_example = input_queue.dequeue() -# serialized_sequence_example = tf.train.string_input_producer(["train.cat_caption"]) # don't work -context, sequence = tf.parse_single_sequence_example( - serialized=serialized_sequence_example, context_features={"image/img_raw": tf.FixedLenFeature([], dtype=tf.string)}, - sequence_features={ - "image/caption": tf.FixedLenSequenceFeature([], dtype=tf.string), - "image/caption_ids": tf.FixedLenSequenceFeature([], dtype=tf.int64), - } -) - -img = tf.decode_raw(context["image/img_raw"], tf.uint8) -img = tf.reshape(img, [height, width, 3]) -img = tf.image.convert_image_dtype(img, dtype=tf.float32) - -try: - # for TensorFlow 0.11 - img = tf.image.resize_images(img, size=(resize_height, resize_width), method=tf.image.ResizeMethod.BILINEAR) -except Exception: - # for TensorFlow 0.10 - img = tf.image.resize_images( - img, new_height=resize_height, new_width=resize_width, method=tf.image.ResizeMethod.BILINEAR - ) -# Crop to final dimensions. -if is_training: - img = tf.random_crop(img, [height, width, 3]) -else: - # Central crop, assuming resize_height > height, resize_width > width. - img = tf.image.resize_image_with_crop_or_pad(img, height, width) -# Randomly distort the image. -if is_training: - img = distort_image(img, thread_id=0) -# Rescale to [-1, 1] instead of [0, 1] -img = tf.subtract(img, 0.5) -img = tf.multiply(img, 2.0) -img_cap = sequence["image/caption"] -img_cap_ids = sequence["image/caption_ids"] -img_batch, img_cap_batch, img_cap_ids_batch = tf.train.batch( - [img, img_cap, img_cap_ids], # Note: shuffle_batch doesn't support dynamic_pad - batch_size=4, - capacity=50000, - dynamic_pad=True, # string list pad with '', int list pad with 0 - num_threads=4 -) -sess = tf.Session() -# sess.run(tf.global_variables_initializer()) -tl.layers.initialize_global_variables(sess) -coord = tf.train.Coordinator() -threads = tf.train.start_queue_runners(sess=sess, coord=coord) -for _ in range(3): - print("Step %s" % _) - # print(sess.run([img, img_cap, img_cap_ids])) # one example only - imgs, caps, caps_id = sess.run([img_batch, img_cap_batch, img_cap_ids_batch]) # batch of examples with dynamic_pad - print(caps) - print(caps_id) - tl.visualize.images2d((imgs + 1) / 2, second=1, saveable=False, name='batch', dtype=None, fig_idx=202025) -coord.request_stop() -coord.join(threads) -sess.close() - - -# 4. Prefetch serialized SequenceExample protos. Create MASK and TARGET ======= -def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_summaries=True): - """Batches input images and captions. - - This function splits the caption into an input sequence and a target sequence, - where the target sequence is the input sequence right-shifted by 1. Input and - target sequences are batched and padded up to the maximum length of sequences - in the batch. A mask is created to distinguish real words from padding words. - - Example: - Actual captions in the batch ('-' denotes padded character): - [ - [ 1 2 5 4 5 ], - [ 1 2 3 4 - ], - [ 1 2 3 - - ], - ] - - input_seqs: - [ - [ 1 2 3 4 ], - [ 1 2 3 - ], - [ 1 2 - - ], - ] - - target_seqs: - [ - [ 2 3 4 5 ], - [ 2 3 4 - ], - [ 2 3 - - ], - ] - - mask: - [ - [ 1 1 1 1 ], - [ 1 1 1 0 ], - [ 1 1 0 0 ], - ] - - Args: - images_and_captions: A list of pairs [image, caption], where image is a - Tensor of shape [height, width, channels] and caption is a 1-D Tensor of - any length. Each pair will be processed and added to the queue in a - separate thread. - batch_size: Batch size. - queue_capacity: Queue capacity. - add_summaries: If true, add caption length summaries. - - Returns: - images: A Tensor of shape [batch_size, height, width, channels]. - input_seqs: An int32 Tensor of shape [batch_size, padded_length]. - target_seqs: An int32 Tensor of shape [batch_size, padded_length]. - mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. - """ - enqueue_list = [] - for image, caption in images_and_captions: - caption_length = tf.shape(caption)[0] - input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0) - - input_seq = tf.slice(caption, [0], input_length) - target_seq = tf.slice(caption, [1], input_length) - indicator = tf.ones(input_length, dtype=tf.int32) - enqueue_list.append([image, input_seq, target_seq, indicator]) - - images, input_seqs, target_seqs, mask = tf.train.batch_join( - enqueue_list, batch_size=batch_size, capacity=queue_capacity, dynamic_pad=True, name="batch_and_pad" - ) - - if add_summaries: - lengths = tf.add(tf.reduce_sum(mask, 1), 1) - tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths)) - tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths)) - tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths)) - - return images, input_seqs, target_seqs, mask - - -images, input_seqs, target_seqs, input_mask = ( - batch_with_dynamic_pad(images_and_captions=[[img, img_cap]], batch_size=4, queue_capacity=50000) -) -sess = tf.Session() -sess.run(tf.global_variables_initializer()) -coord = tf.train.Coordinator() -threads = tf.train.start_queue_runners(sess=sess, coord=coord) -for _ in range(3): - print("Step %s" % _) - imgs, inputs, targets, masks = sess.run([images, input_seqs, target_seqs, input_mask]) - print(inputs) - print(targets) - print(masks) - tl.visualize.images2d((imgs + 1) / 2, second=1, saveable=False, name='batch', dtype=None, fig_idx=202025) -coord.request_stop() -coord.join(threads) -sess.close() diff --git a/examples/database/README.md b/examples/database/README.md deleted file mode 100644 index 0636abc7b..000000000 --- a/examples/database/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Dispatch Tasks - -1. This script (`dispatch_tasks.py`) creates 3 tasks (`task_script.py`) with different hyper-parameters and a dataset and pushes these tasks into the database. -2. On your GPU servers (for testing, it can be a new terminal on your local machine), run tasks as shown in `run_tasks.py`. -This script pulls and runs pending tasks, and saves the models and results to the database. -3. When all tasks complete, the dispatcher (`dispatch_tasks.py`) then selects the best model according to its accuracy. - - -# Save and load models - -- `task_script.py` shows how to save model. -- `dispatch_tasks.py ` shows how to find and load the model with the best testing accuracy. - -# Save and load datasets - -- `dispatch_tasks.py ` shows how to save a dataset. -- `task_script.py ` show how to find and load a dataset. - -#### More information in the online documentation. \ No newline at end of file diff --git a/examples/database/dispatch_tasks.py b/examples/database/dispatch_tasks.py deleted file mode 100644 index 4c8c02e44..000000000 --- a/examples/database/dispatch_tasks.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -A sample script that shows how to distribute multiple tasks to multiple machine -using the database module. - -""" -import time - -import tensorflow as tf - -import tensorlayer as tl - -tl.logging.set_verbosity(tl.logging.DEBUG) -# tf.logging.set_verbosity(tf.logging.DEBUG) - -# connect to database -db = tl.db.TensorHub(ip='localhost', port=27017, dbname='temp', project_name='tutorial') - -# delete existing tasks, models and datasets in this project -db.delete_tasks() -db.delete_model() -db.delete_datasets() - -# save dataset into database, then allow other servers to use it -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -db.save_dataset((X_train, y_train, X_val, y_val, X_test, y_test), 'mnist', description='handwriting digit') - -# push tasks into database, then allow other servers pull tasks to run -db.create_task( - task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=800, n_units2=800), - saved_result_keys=['test_accuracy'], description='800-800' -) - -db.create_task( - task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=600, n_units2=600), - saved_result_keys=['test_accuracy'], description='600-600' -) - -db.create_task( - task_name='mnist', script='task_script.py', hyper_parameters=dict(n_units1=400, n_units2=400), - saved_result_keys=['test_accuracy'], description='400-400' -) - -# wait for tasks to finish -while db.check_unfinished_task(task_name='mnist'): - print("waiting runners to finish the tasks") - time.sleep(1) - -# get the best model -print("all tasks finished") -net = db.find_top_model(model_name='mlp', sort=[("test_accuracy", -1)]) -print("the best accuracy {} is from model {}".format(net._test_accuracy, net._name)) diff --git a/examples/database/run_tasks.py b/examples/database/run_tasks.py deleted file mode 100644 index 446c2508f..000000000 --- a/examples/database/run_tasks.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Run this script on servers, it will monitor the database and run tasks when -task distributor push a task to the database. - -""" -import time - -import tensorlayer as tl - -# tl.logging.set_verbosity(tl.logging.DEBUG) - -# connect to database -db = tl.db.TensorHub(ip='localhost', port=27017, dbname='temp', project_name='tutorial') - -# monitors the database and pull tasks to run -while True: - print("waiting task from distributor") - db.run_top_task(task_name='mnist', sort=[("time", -1)]) - time.sleep(1) diff --git a/examples/database/task_script.py b/examples/database/task_script.py deleted file mode 100644 index 3f2f93ccd..000000000 --- a/examples/database/task_script.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Sample task script.""" - -import tensorflow as tf - -import tensorlayer as tl - -# tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -# connect to database -db = tl.db.TensorHub(ip='localhost', port=27017, dbname='temp', project_name='tutorial') - -# load dataset from database -X_train, y_train, X_val, y_val, X_test, y_test = db.find_top_dataset('mnist') - - -# define the network -def mlp(): - ni = tl.layers.Input([None, 784], name='input') - net = tl.layers.Dropout(keep=0.8, name='drop1')(ni) - net = tl.layers.Dense(n_units=n_units1, act=tf.nn.relu, name='relu1')(net) - net = tl.layers.Dropout(keep=0.5, name='drop2')(net) - net = tl.layers.Dense(n_units=n_units2, act=tf.nn.relu, name='relu2')(net) - net = tl.layers.Dropout(keep=0.5, name='drop3')(net) - net = tl.layers.Dense(n_units=10, act=None, name='output')(net) - M = tl.models.Model(inputs=ni, outputs=net) - return M - - -network = mlp() - -# cost and accuracy -cost = tl.cost.cross_entropy - - -def acc(y, y_): - correct_prediction = tf.equal(tf.argmax(y, 1), tf.convert_to_tensor(y_, tf.int64)) - return tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - - -# define the optimizer -train_op = tf.optimizers.Adam(learning_rate=0.0001) - -# train the network -# tl.utils.fit( -# network, train_op, cost, X_train, y_train, acc=acc, batch_size=500, n_epoch=20, print_freq=5, -# X_val=X_val, y_val=y_val, eval_train=False -# ) - -tl.utils.fit( - network, - train_op=tf.optimizers.Adam(learning_rate=0.0001), - cost=tl.cost.cross_entropy, - X_train=X_train, - y_train=y_train, - acc=acc, - batch_size=256, - n_epoch=20, - X_val=X_val, - y_val=y_val, - eval_train=False, -) - -# evaluation and save result that match the result_key -test_accuracy = tl.utils.test(network, acc, X_test, y_test, batch_size=None, cost=cost) -test_accuracy = float(test_accuracy) - -# save model into database -db.save_model(network, model_name='mlp', name=str(n_units1) + '-' + str(n_units2), test_accuracy=test_accuracy) -# in other script, you can load the model as follow -# net = db.find_model(sess=sess, model_name=str(n_units1)+'-'+str(n_units2) diff --git a/examples/deprecated_tutorials/tutorial_image_preprocess.py b/examples/deprecated_tutorials/tutorial_image_preprocess.py deleted file mode 100755 index 7b4167ea7..000000000 --- a/examples/deprecated_tutorials/tutorial_image_preprocess.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Data Augmentation by numpy, scipy, threading and queue. - -Note that, TensorFlow's TFRecord and Dataset API are faster. - -""" - -import time - -import tensorlayer as tl - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def distort_img(x): - x = tl.prepro.flip_axis(x, axis=1, is_random=True) - x = tl.prepro.crop(x, wrg=28, hrg=28, is_random=True) - return x - - -s = time.time() -results = tl.prepro.threading_data(X_train[0:100], distort_img) -print("took %.3fs" % (time.time() - s)) -print(results.shape) - -tl.vis.save_images(X_train[0:10], [1, 10], '_original.png') -tl.vis.save_images(results[0:10], [1, 10], '_distorted.png') diff --git a/examples/deprecated_tutorials/tutorial_imagenet_inceptionV3_distributed.py b/examples/deprecated_tutorials/tutorial_imagenet_inceptionV3_distributed.py deleted file mode 100644 index 6c208f354..000000000 --- a/examples/deprecated_tutorials/tutorial_imagenet_inceptionV3_distributed.py +++ /dev/null @@ -1,452 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""Example of training an Inception V3 model with ImageNet. - -The parameters are set as in the best results of the paper: https://arxiv.org/abs/1512.00567 - -The dataset can be downloaded from http://www.image-net.org/ or from the Kaggle competition: -https://www.kaggle.com/c/imagenet-object-localization-challenge/data - -""" - -import argparse -import logging -import multiprocessing -import os -import random -import sys -import time -from xml.etree import ElementTree - -import numpy as np -import tensorflow as tf -from tensorflow.contrib import slim -from tensorflow.contrib.slim.python.slim.nets.inception_v3 import (inception_v3, inception_v3_arg_scope) -from tensorflow.python.framework.errors_impl import OutOfRangeError -from tensorflow.python.training import session_run_hook -from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook -from tensorflow.python.training.monitored_session import \ - SingularMonitoredSession - -import tensorlayer as tl - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -########## VARIABLES ########## - -# get the dataset: https://www.kaggle.com/c/imagenet-object-localization-challenge/data -# get the synset dictionary: http://www.image-net.org/archive/words.txt - -BASE_DIR = './' -ILSVRC_DIR = os.path.join(BASE_DIR, 'ILSVRC') -SYNSET_DICT = os.path.join(BASE_DIR, 'words.txt') -TRAIN_FILE = os.path.join(BASE_DIR, 'train.csv') -VAL_FILE = os.path.join(BASE_DIR, 'val.csv') -CLASSES_FILE = os.path.join(BASE_DIR, 'classes.csv') -CLASSES_VAL_FILE = os.path.join(BASE_DIR, 'classes_val.csv') -CHECKPOINTS_PATH = './checkpoints' - -########## DATASETS ########## - - -def get_data_sample(annotation_file, annotations_dir, data_dir): - labels = [] - image_file = annotation_file.replace(annotations_dir, data_dir).replace('.xml', '.JPEG') - if tf.gfile.Exists(annotation_file) and tf.gfile.Exists(image_file): - xmltree = ElementTree.parse(annotation_file) - objects = xmltree.findall("object") - for object_iter in objects: - labels.append(object_iter.find("name").text) - else: - image_file = None - return image_file, labels - - -def might_create_dataset(prefix, file, shuffle=False, suffix='**/*.xml'): - # load data - data = [] - labels = set() - annotations_dir = os.path.join(ILSVRC_DIR, 'Annotations', 'CLS-LOC', prefix) - data_dir = os.path.join(ILSVRC_DIR, 'Data', 'CLS-LOC', prefix) - for filename in tf.gfile.Glob(os.path.join(annotations_dir, suffix)): - image_path, image_labels = get_data_sample(filename, annotations_dir, data_dir) - if image_path is not None and len(image_labels) > 0: - data.append([image_path] + image_labels) - for label in image_labels: - labels.add(label) - if shuffle: - random.shuffle(data) - # write data - with tf.gfile.Open(file, 'w') as f: - for d in data: - f.write('{}\n'.format(','.join(d))) - return sorted(labels) - - -def might_create_training_set(): - if not tf.gfile.Exists(TRAIN_FILE): - labels = might_create_dataset('train', TRAIN_FILE, shuffle=True) - with tf.gfile.Open(CLASSES_FILE, 'w') as f: - for l in labels: - f.write('{}\n'.format(l)) - - -def might_create_validation_set(): - if not tf.gfile.Exists(VAL_FILE): - labels = might_create_dataset('val', VAL_FILE, suffix='*.xml') - with tf.gfile.Open(CLASSES_VAL_FILE, 'w') as f: - for l in labels: - f.write('{}\n'.format(l)) - - -def load_data(file, task_spec=None, batch_size=16, epochs=1, shuffle_size=0): - # load classes dict: - with tf.gfile.Open(CLASSES_FILE) as f: - labels = dict() - for i, line in enumerate(f.readlines()): - label = line.strip() - labels[label] = i - num_classes = len(labels) - # count file examples - with tf.gfile.Open(file) as f: - size = len(f.readlines()) - - image_size = inception_v3.default_image_size - dataset = tf.data.TextLineDataset([file]) - dataset = dataset.repeat(epochs) - # split the dataset in shards - if task_spec is not None and task_spec.num_workers > 1 and not task_spec.is_evaluator(): - dataset = dataset.shard(num_shards=task_spec.num_workers, index=task_spec.shard_index) - if shuffle_size > 0: - dataset = dataset.shuffle(buffer_size=shuffle_size) - - def _parse_example_fn(line): - line_split = line.decode().split(',') - filename = line_split[0] - labels_names = line_split[1:] - # labels - one_hot_labels = np.zeros(num_classes, dtype=np.float32) - for l in labels_names: - one_hot_labels[labels[l]] = 1.0 - # image - image_bytes = tf.gfile.FastGFile(filename, 'rb').read() - return image_bytes, one_hot_labels - - def _map_fn(example_serialized): - image_bytes, one_hot_labels = tf.py_func( - _parse_example_fn, [example_serialized], [tf.string, tf.float32], stateful=False - ) - - image = tf.image.decode_jpeg(image_bytes, channels=3) - image = tf.image.resize_images(image, size=[image_size, image_size]) - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - one_hot_labels = tf.reshape(one_hot_labels, [num_classes]) - return image, one_hot_labels - - max_cpus = multiprocessing.cpu_count() - dataset = dataset.map(_map_fn, num_parallel_calls=max_cpus) - dataset = dataset.prefetch(batch_size * max_cpus + 100) - dataset = dataset.batch(batch_size) - images, one_hot_classes = dataset.make_one_shot_iterator().get_next() - - images = tf.reshape(images, [batch_size, image_size, image_size, 3]) - one_hot_classes = tf.reshape(one_hot_classes, [batch_size, num_classes]) - - return images, one_hot_classes, num_classes, size - - -########## NETWORK ########## - - -def build_network(image_input, num_classes=1001, is_training=False): - net_in = tl.layers.InputLayer(image_input, name='input_layer') - with slim.arg_scope(inception_v3_arg_scope()): - network = tl.layers.SlimNetsLayer( - prev_layer=net_in, slim_layer=inception_v3, slim_args={ - 'num_classes': num_classes, - 'is_training': is_training - }, name='InceptionV3' - ) - - predictions = tf.nn.sigmoid(network.outputs, name='Predictions') - return network, predictions - - -########## EVALUATOR ########## - - -class EvaluatorStops(Exception): - - def __init__(self, message): - super(EvaluatorStops, self).__init__(message) - - -class EvaluatorHook(session_run_hook.SessionRunHook): - - def __init__(self, checkpoints_path, saver): - self.checkpoints_path = checkpoints_path - self.summary_writer = tf.summary.FileWriter(os.path.join(checkpoints_path, 'validation')) - self.lastest_checkpoint = None - self.saver = saver - self.summary = None - - def after_create_session(self, session, coord): - checkpoint = tf.train.latest_checkpoint(self.checkpoints_path) - # wait until a new check point is available - total_waited_secs = 0 - while self.lastest_checkpoint == checkpoint: - time.sleep(30) # sleep 30 seconds waiting for a new checkpoint - checkpoint = tf.train.latest_checkpoint(self.checkpoints_path) - total_waited_secs += 30 - if total_waited_secs > 30 * 60 * 60: - raise EvaluatorStops('Waited more than half an hour to load a new checkpoint') - - # restore the checkpoint - self.saver.restore(session, checkpoint) - self.lastest_checkpoint = checkpoint - self.eval_step = int(self.lastest_checkpoint.split('-')[-1]) - - def end(self, session): - super(EvaluatorHook, self).end(session) - # save summaries - self.summary_writer.add_summary(self.summary, self.eval_step) - - -########## METRICS ########## - - -def calculate_metrics(predicted_batch, real_batch, threshold=0.5, is_training=False, ema_decay=0.9): - with tf.variable_scope('metric'): - threshold_graph = tf.constant(threshold, name='threshold') - zero_point_five = tf.constant(0.5) - predicted_bool = tf.greater_equal(predicted_batch, threshold_graph) - real_bool = tf.greater_equal(real_batch, zero_point_five) - predicted_bool_neg = tf.logical_not(predicted_bool) - real_bool_neg = tf.logical_not(real_bool) - differences_bool = tf.logical_xor(predicted_bool, real_bool) - tp = tf.logical_and(predicted_bool, real_bool) - tn = tf.logical_and(predicted_bool_neg, real_bool_neg) - fn = tf.logical_and(differences_bool, real_bool) - fp = tf.logical_and(differences_bool, predicted_bool) - tp = tf.reduce_sum(tf.cast(tp, tf.float32)) - tn = tf.reduce_sum(tf.cast(tn, tf.float32)) - fn = tf.reduce_sum(tf.cast(fn, tf.float32)) - fp = tf.reduce_sum(tf.cast(fp, tf.float32)) - - average_ops = None - init_op = None - if is_training: - ema = tf.train.ExponentialMovingAverage(decay=ema_decay) - average_ops = ema.apply([tp, tn, fp, fn]) - tp = ema.average(tp) - tn = ema.average(tn) - fp = ema.average(fp) - fn = ema.average(fn) - else: - tp_v = tf.Variable(0, dtype=tf.float32, name='true_positive', trainable=False) - tn_v = tf.Variable(0, dtype=tf.float32, name='true_negative', trainable=False) - fp_v = tf.Variable(0, dtype=tf.float32, name='false_positive', trainable=False) - fn_v = tf.Variable(0, dtype=tf.float32, name='false_negative', trainable=False) - init_op = [tf.assign(tp_v, 0), tf.assign(tn_v, 0), tf.assign(fp_v, 0), tf.assign(fn_v, 0)] - tp = tf.assign_add(tp_v, tp) - tn = tf.assign_add(tn_v, tn) - fp = tf.assign_add(fp_v, fp) - fn = tf.assign_add(fn_v, fn) - - # calculate metrics - precision = tp / (tp + fp) - recall = tp / (tp + fn) - accuracy = (tp + tn) / (tp + tn + fp + fn) - fall_out = fp / (tn + fp) - f1_score = tp * 2 / (tp * 2 + fp + fn) - - # remove NaNs and set them to 0 - zero = tf.constant(0, dtype=tf.float32) - precision = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: precision) - recall = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: recall) - accuracy = tf.cond(tf.equal(tp + tn, 0.0), lambda: zero, lambda: accuracy) - fall_out = tf.cond(tf.equal(fp, 0.0), lambda: zero, lambda: fall_out) - f1_score = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: f1_score) - - # add to tensorboard - # tf.summary.scalar('accuracy', accuracy) - tf.summary.scalar('precision', precision) - tf.summary.scalar('recall', recall) - tf.summary.scalar('fall-out', fall_out) - tf.summary.scalar('f1-score', f1_score) - tf.summary.scalar('true_positive', tp) - tf.summary.scalar('true_negative', tn) - tf.summary.scalar('false_positive', fp) - tf.summary.scalar('false_negative', fn) - - metrics_ops = { - # 'accuracy' : accuracy, - 'precision': precision, - 'recall': recall, - 'fall-out': fall_out, - 'f1-score': f1_score, - 'true positive': tp, - 'true negative': tn, - 'false positive': fp, - 'false negative': fn, - } - return init_op, average_ops, metrics_ops - - -def run_evaluator(task_spec, checkpoints_path, batch_size=32): - with tf.Graph().as_default(): - # load dataset - images_input, one_hot_classes, num_classes, _dataset_size = load_data( - file=VAL_FILE, task_spec=task_spec, batch_size=batch_size, epochs=1 - ) - _network, predictions = build_network(images_input, num_classes=num_classes, is_training=False) - saver = tf.train.Saver() - # metrics - metrics_init_ops, _, metrics_ops = calculate_metrics( - predicted_batch=predictions, real_batch=one_hot_classes, is_training=False - ) - # tensorboard summary - summary_op = tf.summary.merge_all() - # session hook - evaluator_hook = EvaluatorHook(checkpoints_path=checkpoints_path, saver=saver) - - try: - # infinite loop - while True: - with SingularMonitoredSession(hooks=[evaluator_hook]) as sess: - sess.run(metrics_init_ops) - try: - while not sess.should_stop(): - metrics, summary = sess.run([metrics_ops, summary_op]) - evaluator_hook.summary = summary - except OutOfRangeError: - pass - logging.info('step: {} {}'.format(evaluator_hook.eval_step, metrics)) - except EvaluatorStops: - # the evaluator has waited too long for a new checkpoint - pass - - -########## TRAINING ########## - - -def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10): - device_fn = task_spec.device_fn() if task_spec is not None else None - # create graph - with tf.Graph().as_default(): - global_step = tf.train.get_or_create_global_step() - with tf.device(device_fn): - # load dataset - images_input, one_hot_classes, num_classes, dataset_size = load_data( - file=TRAIN_FILE, task_spec=task_spec, batch_size=batch_size, epochs=epochs, shuffle_size=10000 - ) - # network - network, predictions = build_network(images_input, num_classes=num_classes, is_training=True) - # training operations - loss = tl.cost.sigmoid_cross_entropy(output=network.outputs, target=one_hot_classes, name='loss') - steps_per_epoch = dataset_size / batch_size - learning_rate = tf.train.exponential_decay( - learning_rate=0.045, - global_step=global_step, - decay_steps=steps_per_epoch * 2, # 2 epochs - decay_rate=0.94, - staircase=True, - name='learning_rate' - ) - optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=0.9, epsilon=1.0) - # clip and apply gradients - gvs = optimizer.compute_gradients(loss=loss, var_list=network.all_params) - capped_gvs = [] - for grad, var in gvs: - if grad is not None: - grad = tf.clip_by_value(grad, -2., 2.) - capped_gvs.append((grad, var)) - train_op = optimizer.apply_gradients(grads_and_vars=capped_gvs, global_step=global_step) - # metrics - tf.summary.scalar('learning_rate/value', learning_rate) - tf.summary.scalar('loss/logits', loss) - _, metrics_average_ops, metrics_ops = calculate_metrics( - predicted_batch=predictions, real_batch=one_hot_classes, is_training=True - ) - with tf.control_dependencies([train_op]): - train_op = tf.group(metrics_average_ops) - - # start training - hooks = [StopAtStepHook(last_step=steps_per_epoch * epochs)] - with tl.distributed.DistributedSession(task_spec=task_spec, hooks=hooks, checkpoint_dir=checkpoints_path, - save_summaries_secs=None, save_summaries_steps=300, - save_checkpoint_secs=60 * 60) as sess: - # print network information - if task_spec is None or task_spec.is_master(): - network.print_params(False, session=sess) - network.print_layers() - sys.stdout.flush() - # run training - try: - last_log_time = time.time() - next_log_time = last_log_time + 60 - while not sess.should_stop(): - step, loss_val, learning_rate_val, _, metrics = sess.run( - [global_step, loss, learning_rate, train_op, metrics_ops] - ) - if task_spec is None or task_spec.is_master(): - now = time.time() - if now > next_log_time: - last_log_time = now - next_log_time = last_log_time + 60 - current_epoch = '{:.3f}'.format(float(step) / steps_per_epoch) - max_steps = epochs * steps_per_epoch - m = 'Epoch: {}/{} Steps: {}/{} Loss: {} Learning rate: {} Metrics: {}' - logging.info( - m.format(current_epoch, epochs, step, max_steps, loss_val, learning_rate_val, metrics) - ) - except OutOfRangeError: - pass - - -########## MAIN ########## - -if __name__ == '__main__': - # print output logging - logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') - - if not tf.gfile.Exists(ILSVRC_DIR): - raise FileNotFoundError( - 'We cannot find the directory "{}"\n' - 'You need to modify the variable BASE_DIR with the path where the dataset is.\n' - 'The dataset can be downloaded from http://www.image-net.org/ or from the Kaggle competition:\n' - 'https://www.kaggle.com/c/imagenet-object-localization-challenge/data'.format(ILSVRC_DIR) - ) - - # args - parser = argparse.ArgumentParser() - parser.add_argument('--with_evaluator', dest='with_evaluator', action='store_true') - parser.add_argument('--batch_size', dest='batch_size', type=int, default=32) - parser.add_argument('--epochs', dest='epochs', type=int, default=100) - parser.set_defaults(with_evaluator=False) - args = parser.parse_args() - logging.info('Batch size: {}'.format(args.batch_size)) - logging.info('Epochs: {}'.format(args.epochs)) - - # check the dataset and create them if necessary - might_create_training_set() - might_create_validation_set() - - # load environment for distributed training using last worker as evaluator - task_spec = tl.distributed.TaskSpec() - - if task_spec is None: - logging.info('Run in single node') - run_worker(task_spec, CHECKPOINTS_PATH, batch_size=args.batch_size, epochs=args.epochs) - else: - if args.with_evaluator: - # run with evaluator - logging.info('Last worker is the evaluator') - task_spec = task_spec.use_last_worker_as_evaluator() - - if task_spec.is_evaluator(): - run_evaluator(task_spec, CHECKPOINTS_PATH, batch_size=args.batch_size) - else: - task_spec.create_server() - run_worker(task_spec, CHECKPOINTS_PATH, batch_size=args.batch_size, epochs=args.epochs) diff --git a/examples/deprecated_tutorials/tutorial_mnist_distributed.py b/examples/deprecated_tutorials/tutorial_mnist_distributed.py deleted file mode 100644 index 29d291ba4..000000000 --- a/examples/deprecated_tutorials/tutorial_mnist_distributed.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""Alpha Version for Distributed Training - -you can test this example in your local machine using 2 workers and 1 ps like below, -where CUDA_VISIBLE_DEVICES can be used to set the GPUs the process can use. - -CUDA_VISIBLE_DEVICES= TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"type": "worker", "index": 0}}' python example/tutorial_mnist_distributed.py > output-master 2>&1 & -CUDA_VISIBLE_DEVICES= TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"type": "worker", "index": 1}}' python example/tutorial_mnist_distributed.py > output-worker 2>&1 & -CUDA_VISIBLE_DEVICES= TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"type": "ps", "index": 0}}' python example/tutorial_mnist_distributed.py > output-ps 2>&1 & -Note: for GPU, please set CUDA_VISIBLE_DEVICES=GPU_ID - -""" - -import tensorflow as tf - -import tensorlayer as tl - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -# load environment for distributed training -task_spec = tl.distributed.TaskSpec() -task_spec.create_server() -device_fn = task_spec.device_fn() if task_spec is not None else None - -# prepare data -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - -# create graph -with tf.device(device_fn): - # define placeholder - x = tf.placeholder(tf.float32, shape=[None, 784], name='x') - y_ = tf.placeholder(tf.int64, shape=[None], name='y_') - - # define the network - network = tl.layers.InputLayer(x, name='input') - network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') - network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu1') - network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2') - network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu2') - network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') - # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to - # speed up computation, so we use identity here. - # see tf.nn.sparse_softmax_cross_entropy_with_logits() - network = tl.layers.DenseLayer(network, n_units=10, act=None, name='output') - - # define cost function and metric. - y = network.outputs - cost = tl.cost.cross_entropy(y, y_, name='cost') - correct_prediction = tf.equal(tf.argmax(y, 1), y_) - acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - y_op = tf.argmax(tf.nn.softmax(y), 1) - - # define the optimizer - train_params = network.all_params - train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params) - - with tl.distributed.DistributedSession(task_spec=task_spec) as sess: - # print network information - if task_spec.is_master(): - network.print_params(session=sess) - network.print_layers() - print_freq = 5 - eval_train = False - else: - print_freq = 1000 - eval_train = False - - # We do not need to initialize the variables as the session does it - #tl.layers.initialize_global_variables(sess) - - # train the network - tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, \ - acc=acc, batch_size=500, n_epoch=500, print_freq=print_freq, \ - X_val=X_val, y_val=y_val, eval_train=eval_train) - - if task_spec.is_master(): - # evaluation - tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost) - - # save the network to .npz file - tl.files.save_npz(network.all_params, name='model.npz') diff --git a/examples/deprecated_tutorials/tutorial_mnist_distributed.yml b/examples/deprecated_tutorials/tutorial_mnist_distributed.yml deleted file mode 100644 index 1b7ff944e..000000000 --- a/examples/deprecated_tutorials/tutorial_mnist_distributed.yml +++ /dev/null @@ -1,87 +0,0 @@ -# https://docs.docker.com/compose/compose-file/ -# -# reference: https://docs.microsoft.com/en-us/azure/container-service/dcos-swarm/container-service-swarm-walkthrough -# 1. create a swarm cluster on azure: -# $ az group create -l southeastasia -n tensorlayer-swarm -o table --debug -# $ az acs create -n tl-swarm-culster --orchestrator-type Swarm -g tensorlayer-swarm --agent-count 3 -o table --debug -# -# 2. create a ssh tunnel to swarm master: -# $ master=$(az acs show -n tl-swarm-culster -g tensorlayer-swarm --query 'masterProfile.fqdn' | jq -r .) -# $ ssh -p 2200 -fNL 2375:localhost:2375 azureuser@$master -# $ export DOCKER_HOST=:2375 -# -# 3. start -# $ docker-compose -f tutorial_mnist_distributed.yml up - ---- -version: '3' -services: - master: - image: tensorlayer/tensorlayer:latest - entrypoint: - - python - - /tensorlayer/example/tutorial_mnist_distributed.py - environment: - CUDA_VISIBLE_DEVICES: '' - TF_CONFIG: |- - { - "cluster": { - "ps": [ - "ps:3001" - ], - "worker": [ - "master:3002", - "worker:3003" - ] - }, - "task": { - "type": "worker", - "index": 0 - } - } - worker: - image: tensorlayer/tensorlayer:latest - entrypoint: - - python - - /tensorlayer/example/tutorial_mnist_distributed.py - environment: - CUDA_VISIBLE_DEVICES: '' - TF_CONFIG: |- - { - "cluster": { - "ps": [ - "ps:3001" - ], - "worker": [ - "master:3002", - "worker:3003" - ] - }, - "task": { - "type": "worker", - "index": 1 - } - } - ps: - image: tensorlayer/tensorlayer:latest - entrypoint: - - python - - /tensorlayer/example/tutorial_mnist_distributed.py - environment: - CUDA_VISIBLE_DEVICES: '' - TF_CONFIG: |- - { - "cluster": { - "ps": [ - "ps:3001" - ], - "worker": [ - "master:3002", - "worker:3003" - ] - }, - "task": { - "type": "ps", - "index": 0 - } - } diff --git a/examples/distributed_training/README.md b/examples/distributed_training/README.md deleted file mode 100644 index e5ba08182..000000000 --- a/examples/distributed_training/README.md +++ /dev/null @@ -1 +0,0 @@ -Mai Luo: \ No newline at end of file diff --git a/examples/distributed_training/tutorial_cifar10_distributed_trainer.py b/examples/distributed_training/tutorial_cifar10_distributed_trainer.py deleted file mode 100644 index 830bf879b..000000000 --- a/examples/distributed_training/tutorial_cifar10_distributed_trainer.py +++ /dev/null @@ -1,124 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -r""" -1. Before you start, run this script: https://github.com/tensorlayer/tensorlayer/blob/distributed/scripts/download_and_install_openmpi3_linux.sh -2. Update the PATH with OpenMPI bin by running: PATH=$PATH:$HOME/local/openmpi/bin - Update the PATH in ~/.bashrc if you want OpenMPI to be ready once the machine start -3. Then XXXXX Milo please add this part - mpirun -np 2 \ - -bind-to none -map-by slot \ - -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \ - -mca pml ob1 -mca btl ^openib \ - python3 xxxxx.py -""" - -import multiprocessing - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, MaxPool2d) - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - - -def make_dataset(images, labels, num_epochs=1, shuffle_data_seed=0): - img = tf.data.Dataset.from_tensor_slices(images) - lab = tf.data.Dataset.from_tensor_slices(np.array(labels, dtype=np.int64)) - dataset = tf.data.Dataset.zip((img, lab)) - dataset = dataset.repeat(num_epochs).shuffle(buffer_size=10000, seed=shuffle_data_seed) - return dataset - - -def data_aug_train(img, ann): - # 1. Randomly crop a [height, width] section of the image. - img = tf.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - return img, ann - - -def data_aug_valid(img, ann): - # 1. Crop the central [height, width] of the image. - img = tf.image.resize_image_with_crop_or_pad(img, 24, 24) - # 2. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - return img, ann - - -def model(x, is_train): - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - net = InputLayer(x, name='input') - net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', b_init=None, name='cnn1') - net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1') - net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1') - - net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', b_init=None, name='cnn2') - net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2') - net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2') - - net = FlattenLayer(net, name='flatten') - net = DenseLayer(net, 384, act=tf.nn.relu, name='d1relu') - net = DenseLayer(net, 192, act=tf.nn.relu, name='d2relu') - net = DenseLayer(net, 10, act=None, name='output') - return net - - -def build_train(x, y_): - net = model(x, is_train=True) - cost = tl.cost.cross_entropy(net.outputs, y_, name='cost_train') - L2 = 0 - for p in tl.layers.get_variables_with_name('relu/W', True, True): - L2 += tf.contrib.layers.l2_regularizer(0.004)(p) - cost = cost + L2 - accurate_prediction = tf.equal(tf.argmax(net.outputs, 1), y_) - accuracy = tf.reduce_mean(tf.cast(accurate_prediction, tf.float32), name='accuracy_train') - log_tensors = {'cost': cost, 'accuracy': accuracy} - return net, cost, log_tensors - - -def build_validation(x, y_): - net = model(x, is_train=False) - cost = tl.cost.cross_entropy(net.outputs, y_, name='cost_test') - accurate_prediction = tf.equal(tf.argmax(net.outputs, 1), y_) - accuracy = tf.reduce_mean(tf.cast(accurate_prediction, tf.float32), name='accuracy_test') - return net, [cost, accuracy] - - -if __name__ == '__main__': - # Load CIFAR10 data - X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - # Setup the trainer - training_dataset = make_dataset(X_train, y_train) - training_dataset = training_dataset.map(data_aug_train, num_parallel_calls=multiprocessing.cpu_count()) - # validation_dataset = make_dataset(X_test, y_test) - # validation_dataset = training_dataset.map(data_aug_valid, num_parallel_calls=multiprocessing.cpu_count()) - trainer = tl.distributed.Trainer( - build_training_func=build_train, training_dataset=training_dataset, optimizer=tf.train.AdamOptimizer, - optimizer_args={'learning_rate': 0.0001}, batch_size=128, prefetch_size=128 - # validation_dataset=validation_dataset, build_validation_func=build_validation - ) - - # There are multiple ways to use the trainer: - # 1. Easiest way to train all data: trainer.train_to_end() - # 2. Train with validation in the middle: trainer.train_and_validate_to_end(validate_step_size=100) - # 3. Train with full control like follows: - while not trainer.session.should_stop(): - try: - # Run a training step synchronously. - trainer.train_on_batch() - # TODO: do whatever you like to the training session. - except tf.errors.OutOfRangeError: - # The dataset would throw the OutOfRangeError when it reaches the end - break - - # TODO: Test the trained model diff --git a/examples/distributed_training/tutorial_mnist_distributed_trainer.py b/examples/distributed_training/tutorial_mnist_distributed_trainer.py deleted file mode 100755 index 0f1b8b6dd..000000000 --- a/examples/distributed_training/tutorial_mnist_distributed_trainer.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - - -def make_dataset(images, labels, num_epochs=1, shuffle_data_seed=0): - ds1 = tf.data.Dataset.from_tensor_slices(images) - ds2 = tf.data.Dataset.from_tensor_slices(np.array(labels, dtype=np.int64)) - dataset = tf.data.Dataset.zip((ds1, ds2)) - dataset = dataset.repeat(num_epochs).shuffle(buffer_size=10000, seed=shuffle_data_seed) - return dataset - - -def model(x, is_train): - with tf.variable_scope('mlp', reuse=tf.AUTO_REUSE): - network = tl.layers.InputLayer(x, name='input') - network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1', is_fix=True, is_train=is_train) - network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu1') - network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2', is_fix=True, is_train=is_train) - network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu2') - network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3', is_fix=True, is_train=is_train) - network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') - return network - - -def build_train(x, y_): - net = model(x, is_train=True) - cost = tl.cost.cross_entropy(net.outputs, y_, name='cost_train') - accurate_prediction = tf.equal(tf.argmax(net.outputs, 1), y_) - accuracy = tf.reduce_mean(tf.cast(accurate_prediction, tf.float32), name='accuracy_train') - log_tensors = {'cost': cost, 'accuracy': accuracy} - return net, cost, log_tensors - - -def build_validation(x, y_): - net = model(x, is_train=False) - cost = tl.cost.cross_entropy(net.outputs, y_, name='cost_test') - accurate_prediction = tf.equal(tf.argmax(net.outputs, 1), y_) - accuracy = tf.reduce_mean(tf.cast(accurate_prediction, tf.float32), name='accuracy_test') - return net, [cost, accuracy] - - -if __name__ == '__main__': - # Load MNIST data - X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - # Setup the trainer - training_dataset = make_dataset(X_train, y_train) - # validation_dataset = make_dataset(X_val, y_val) - trainer = tl.distributed.Trainer( - build_training_func=build_train, training_dataset=training_dataset, optimizer=tf.train.AdamOptimizer, - optimizer_args={'learning_rate': 0.001}, batch_size=500, prefetch_size=500 - # validation_dataset=validation_dataset, build_validation_func=build_validation - ) - - # There are multiple ways to use the trainer: - # 1. Easiest way to train all data: trainer.train_to_end() - # 2. Train with validation in the middle: trainer.train_and_validate_to_end(validate_step_size=100) - # 3. Train with full control like follows: - while not trainer.session.should_stop(): - try: - # Run a training step synchronously. - trainer.train_on_batch() - # TODO: do whatever you like to the training session. - except tf.errors.OutOfRangeError: - # The dataset would throw the OutOfRangeError when it reaches the end - break - - # TODO: Test the trained model diff --git a/examples/keras_tfslim/README.md b/examples/keras_tfslim/README.md deleted file mode 100644 index a796f0c86..000000000 --- a/examples/keras_tfslim/README.md +++ /dev/null @@ -1 +0,0 @@ -### All other TensorFlow's libraries can be connected into TensorLayer via LambdaLayer. \ No newline at end of file diff --git a/examples/keras_tfslim/tutorial_keras.py b/examples/keras_tfslim/tutorial_keras.py deleted file mode 100644 index 9d0606c5f..000000000 --- a/examples/keras_tfslim/tutorial_keras.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Input, Lambda - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - -batch_size = 128 - -# keras layers -layers = [ - tf.keras.layers.Dropout(0.8), - tf.keras.layers.Dense(800, activation='relu'), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(800, activation='relu'), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(10, activation='linear') -] -keras_block = tf.keras.Sequential(layers) -# in order to compile keras model and get trainable_variables of the keras model -_ = keras_block(np.random.random([batch_size, 784]).astype(np.float32)) - -# build tl model using keras layers -ni = Input([None, 784], dtype=tf.float32) -nn = Lambda(fn=keras_block, fn_weights=keras_block.trainable_variables)(ni) -network = tl.models.Model(inputs=ni, outputs=nn) -print(network) - -n_epoch = 200 -learning_rate = 0.0001 - -train_params = network.trainable_weights -optimizer = tf.optimizers.Adam(learning_rate) - -for epoch in range(n_epoch): - start_time = time.time() - ## Training - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - with tf.GradientTape() as tape: - _logits = network(X_train_a, is_train=True) - err = tl.cost.cross_entropy(_logits, y_train_a, name='train_loss') - - grad = tape.gradient(err, train_params) - optimizer.apply_gradients(zip(grad, train_params)) - # _, _ = sess.run([cost, train_op], feed_dict={x: X_train_a, y_: y_train_a, K.learning_phase(): 1}) - - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - - ## Evaluation - train_loss, train_acc, n_batch = 0, 0, 0 - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - _logits = network(X_train_a, is_train=False) - err = tl.cost.cross_entropy(_logits, y_train_a, name='train_loss') - ac = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(_logits, 1), y_train_a), tf.float32)) - train_loss += err - train_acc += ac - n_batch += 1 - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - val_loss, val_acc, n_batch = 0, 0, 0 - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _logits = network(X_val_a, is_train=False) - err = tl.cost.cross_entropy(_logits, y_val_a, name='train_loss') - ac = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(_logits, 1), y_val_a), tf.float32)) - val_loss += err - val_acc += ac - n_batch += 1 - print(" val loss: %f" % (val_loss / n_batch)) - print(" val acc: %f" % (val_acc / n_batch)) diff --git a/examples/pretrained_cnn/README.md b/examples/pretrained_cnn/README.md deleted file mode 100644 index 8c4a96ee2..000000000 --- a/examples/pretrained_cnn/README.md +++ /dev/null @@ -1,2 +0,0 @@ -## Please read the docs for using [Pre-trained Models](https://tensorlayer.readthedocs.io/en/latest/user/get_start_advance.html#pre-trained-cnn) - diff --git a/examples/pretrained_cnn/data/__init__.py b/examples/pretrained_cnn/data/__init__.py deleted file mode 100644 index 83d5401c3..000000000 --- a/examples/pretrained_cnn/data/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from __future__ import absolute_import - -from . import * diff --git a/examples/pretrained_cnn/data/imagenet_class_index.json b/examples/pretrained_cnn/data/imagenet_class_index.json deleted file mode 100644 index 5fe0dfefc..000000000 --- a/examples/pretrained_cnn/data/imagenet_class_index.json +++ /dev/null @@ -1 +0,0 @@ -{"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]} \ No newline at end of file diff --git a/examples/pretrained_cnn/data/imagenet_classes.py b/examples/pretrained_cnn/data/imagenet_classes.py deleted file mode 100644 index d721acd45..000000000 --- a/examples/pretrained_cnn/data/imagenet_classes.py +++ /dev/null @@ -1,1000 +0,0 @@ -class_names = '''tench, Tinca tinca -goldfish, Carassius auratus -great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias -tiger shark, Galeocerdo cuvieri -hammerhead, hammerhead shark -electric ray, crampfish, numbfish, torpedo -stingray -cock -hen -ostrich, Struthio camelus -brambling, Fringilla montifringilla -goldfinch, Carduelis carduelis -house finch, linnet, Carpodacus mexicanus -junco, snowbird -indigo bunting, indigo finch, indigo bird, Passerina cyanea -robin, American robin, Turdus migratorius -bulbul -jay -magpie -chickadee -water ouzel, dipper -kite -bald eagle, American eagle, Haliaeetus leucocephalus -vulture -great grey owl, great gray owl, Strix nebulosa -European fire salamander, Salamandra salamandra -common newt, Triturus vulgaris -eft -spotted salamander, Ambystoma maculatum -axolotl, mud puppy, Ambystoma mexicanum -bullfrog, Rana catesbeiana -tree frog, tree-frog -tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui -loggerhead, loggerhead turtle, Caretta caretta -leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea -mud turtle -terrapin -box turtle, box tortoise -banded gecko -common iguana, iguana, Iguana iguana -American chameleon, anole, Anolis carolinensis -whiptail, whiptail lizard -agama -frilled lizard, Chlamydosaurus kingi -alligator lizard -Gila monster, Heloderma suspectum -green lizard, Lacerta viridis -African chameleon, Chamaeleo chamaeleon -Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis -African crocodile, Nile crocodile, Crocodylus niloticus -American alligator, Alligator mississipiensis -triceratops -thunder snake, worm snake, Carphophis amoenus -ringneck snake, ring-necked snake, ring snake -hognose snake, puff adder, sand viper -green snake, grass snake -king snake, kingsnake -garter snake, grass snake -water snake -vine snake -night snake, Hypsiglena torquata -boa constrictor, Constrictor constrictor -rock python, rock snake, Python sebae -Indian cobra, Naja naja -green mamba -sea snake -horned viper, cerastes, sand viper, horned asp, Cerastes cornutus -diamondback, diamondback rattlesnake, Crotalus adamanteus -sidewinder, horned rattlesnake, Crotalus cerastes -trilobite -harvestman, daddy longlegs, Phalangium opilio -scorpion -black and gold garden spider, Argiope aurantia -barn spider, Araneus cavaticus -garden spider, Aranea diademata -black widow, Latrodectus mactans -tarantula -wolf spider, hunting spider -tick -centipede -black grouse -ptarmigan -ruffed grouse, partridge, Bonasa umbellus -prairie chicken, prairie grouse, prairie fowl -peacock -quail -partridge -African grey, African gray, Psittacus erithacus -macaw -sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita -lorikeet -coucal -bee eater -hornbill -hummingbird -jacamar -toucan -drake -red-breasted merganser, Mergus serrator -goose -black swan, Cygnus atratus -tusker -echidna, spiny anteater, anteater -platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus -wallaby, brush kangaroo -koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus -wombat -jellyfish -sea anemone, anemone -brain coral -flatworm, platyhelminth -nematode, nematode worm, roundworm -conch -snail -slug -sea slug, nudibranch -chiton, coat-of-mail shell, sea cradle, polyplacophore -chambered nautilus, pearly nautilus, nautilus -Dungeness crab, Cancer magister -rock crab, Cancer irroratus -fiddler crab -king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica -American lobster, Northern lobster, Maine lobster, Homarus americanus -spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish -crayfish, crawfish, crawdad, crawdaddy -hermit crab -isopod -white stork, Ciconia ciconia -black stork, Ciconia nigra -spoonbill -flamingo -little blue heron, Egretta caerulea -American egret, great white heron, Egretta albus -bittern -crane -limpkin, Aramus pictus -European gallinule, Porphyrio porphyrio -American coot, marsh hen, mud hen, water hen, Fulica americana -bustard -ruddy turnstone, Arenaria interpres -red-backed sandpiper, dunlin, Erolia alpina -redshank, Tringa totanus -dowitcher -oystercatcher, oyster catcher -pelican -king penguin, Aptenodytes patagonica -albatross, mollymawk -grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus -killer whale, killer, orca, grampus, sea wolf, Orcinus orca -dugong, Dugong dugon -sea lion -Chihuahua -Japanese spaniel -Maltese dog, Maltese terrier, Maltese -Pekinese, Pekingese, Peke -Shih-Tzu -Blenheim spaniel -papillon -toy terrier -Rhodesian ridgeback -Afghan hound, Afghan -basset, basset hound -beagle -bloodhound, sleuthhound -bluetick -black-and-tan coonhound -Walker hound, Walker foxhound -English foxhound -redbone -borzoi, Russian wolfhound -Irish wolfhound -Italian greyhound -whippet -Ibizan hound, Ibizan Podenco -Norwegian elkhound, elkhound -otterhound, otter hound -Saluki, gazelle hound -Scottish deerhound, deerhound -Weimaraner -Staffordshire bullterrier, Staffordshire bull terrier -American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier -Bedlington terrier -Border terrier -Kerry blue terrier -Irish terrier -Norfolk terrier -Norwich terrier -Yorkshire terrier -wire-haired fox terrier -Lakeland terrier -Sealyham terrier, Sealyham -Airedale, Airedale terrier -cairn, cairn terrier -Australian terrier -Dandie Dinmont, Dandie Dinmont terrier -Boston bull, Boston terrier -miniature schnauzer -giant schnauzer -standard schnauzer -Scotch terrier, Scottish terrier, Scottie -Tibetan terrier, chrysanthemum dog -silky terrier, Sydney silky -soft-coated wheaten terrier -West Highland white terrier -Lhasa, Lhasa apso -flat-coated retriever -curly-coated retriever -golden retriever -Labrador retriever -Chesapeake Bay retriever -German short-haired pointer -vizsla, Hungarian pointer -English setter -Irish setter, red setter -Gordon setter -Brittany spaniel -clumber, clumber spaniel -English springer, English springer spaniel -Welsh springer spaniel -cocker spaniel, English cocker spaniel, cocker -Sussex spaniel -Irish water spaniel -kuvasz -schipperke -groenendael -malinois -briard -kelpie -komondor -Old English sheepdog, bobtail -Shetland sheepdog, Shetland sheep dog, Shetland -collie -Border collie -Bouvier des Flandres, Bouviers des Flandres -Rottweiler -German shepherd, German shepherd dog, German police dog, alsatian -Doberman, Doberman pinscher -miniature pinscher -Greater Swiss Mountain dog -Bernese mountain dog -Appenzeller -EntleBucher -boxer -bull mastiff -Tibetan mastiff -French bulldog -Great Dane -Saint Bernard, St Bernard -Eskimo dog, husky -malamute, malemute, Alaskan malamute -Siberian husky -dalmatian, coach dog, carriage dog -affenpinscher, monkey pinscher, monkey dog -basenji -pug, pug-dog -Leonberg -Newfoundland, Newfoundland dog -Great Pyrenees -Samoyed, Samoyede -Pomeranian -chow, chow chow -keeshond -Brabancon griffon -Pembroke, Pembroke Welsh corgi -Cardigan, Cardigan Welsh corgi -toy poodle -miniature poodle -standard poodle -Mexican hairless -timber wolf, grey wolf, gray wolf, Canis lupus -white wolf, Arctic wolf, Canis lupus tundrarum -red wolf, maned wolf, Canis rufus, Canis niger -coyote, prairie wolf, brush wolf, Canis latrans -dingo, warrigal, warragal, Canis dingo -dhole, Cuon alpinus -African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus -hyena, hyaena -red fox, Vulpes vulpes -kit fox, Vulpes macrotis -Arctic fox, white fox, Alopex lagopus -grey fox, gray fox, Urocyon cinereoargenteus -tabby, tabby cat -tiger cat -Persian cat -Siamese cat, Siamese -Egyptian cat -cougar, puma, catamount, mountain lion, painter, panther, Felis concolor -lynx, catamount -leopard, Panthera pardus -snow leopard, ounce, Panthera uncia -jaguar, panther, Panthera onca, Felis onca -lion, king of beasts, Panthera leo -tiger, Panthera tigris -cheetah, chetah, Acinonyx jubatus -brown bear, bruin, Ursus arctos -American black bear, black bear, Ursus americanus, Euarctos americanus -ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus -sloth bear, Melursus ursinus, Ursus ursinus -mongoose -meerkat, mierkat -tiger beetle -ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle -ground beetle, carabid beetle -long-horned beetle, longicorn, longicorn beetle -leaf beetle, chrysomelid -dung beetle -rhinoceros beetle -weevil -fly -bee -ant, emmet, pismire -grasshopper, hopper -cricket -walking stick, walkingstick, stick insect -cockroach, roach -mantis, mantid -cicada, cicala -leafhopper -lacewing, lacewing fly -dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk -damselfly -admiral -ringlet, ringlet butterfly -monarch, monarch butterfly, milkweed butterfly, Danaus plexippus -cabbage butterfly -sulphur butterfly, sulfur butterfly -lycaenid, lycaenid butterfly -starfish, sea star -sea urchin -sea cucumber, holothurian -wood rabbit, cottontail, cottontail rabbit -hare -Angora, Angora rabbit -hamster -porcupine, hedgehog -fox squirrel, eastern fox squirrel, Sciurus niger -marmot -beaver -guinea pig, Cavia cobaya -sorrel -zebra -hog, pig, grunter, squealer, Sus scrofa -wild boar, boar, Sus scrofa -warthog -hippopotamus, hippo, river horse, Hippopotamus amphibius -ox -water buffalo, water ox, Asiatic buffalo, Bubalus bubalis -bison -ram, tup -bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis -ibex, Capra ibex -hartebeest -impala, Aepyceros melampus -gazelle -Arabian camel, dromedary, Camelus dromedarius -llama -weasel -mink -polecat, fitch, foulmart, foumart, Mustela putorius -black-footed ferret, ferret, Mustela nigripes -otter -skunk, polecat, wood pussy -badger -armadillo -three-toed sloth, ai, Bradypus tridactylus -orangutan, orang, orangutang, Pongo pygmaeus -gorilla, Gorilla gorilla -chimpanzee, chimp, Pan troglodytes -gibbon, Hylobates lar -siamang, Hylobates syndactylus, Symphalangus syndactylus -guenon, guenon monkey -patas, hussar monkey, Erythrocebus patas -baboon -macaque -langur -colobus, colobus monkey -proboscis monkey, Nasalis larvatus -marmoset -capuchin, ringtail, Cebus capucinus -howler monkey, howler -titi, titi monkey -spider monkey, Ateles geoffroyi -squirrel monkey, Saimiri sciureus -Madagascar cat, ring-tailed lemur, Lemur catta -indri, indris, Indri indri, Indri brevicaudatus -Indian elephant, Elephas maximus -African elephant, Loxodonta africana -lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens -giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca -barracouta, snoek -eel -coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch -rock beauty, Holocanthus tricolor -anemone fish -sturgeon -gar, garfish, garpike, billfish, Lepisosteus osseus -lionfish -puffer, pufferfish, blowfish, globefish -abacus -abaya -academic gown, academic robe, judge's robe -accordion, piano accordion, squeeze box -acoustic guitar -aircraft carrier, carrier, flattop, attack aircraft carrier -airliner -airship, dirigible -altar -ambulance -amphibian, amphibious vehicle -analog clock -apiary, bee house -apron -ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin -assault rifle, assault gun -backpack, back pack, knapsack, packsack, rucksack, haversack -bakery, bakeshop, bakehouse -balance beam, beam -balloon -ballpoint, ballpoint pen, ballpen, Biro -Band Aid -banjo -bannister, banister, balustrade, balusters, handrail -barbell -barber chair -barbershop -barn -barometer -barrel, cask -barrow, garden cart, lawn cart, wheelbarrow -baseball -basketball -bassinet -bassoon -bathing cap, swimming cap -bath towel -bathtub, bathing tub, bath, tub -beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon -beacon, lighthouse, beacon light, pharos -beaker -bearskin, busby, shako -beer bottle -beer glass -bell cote, bell cot -bib -bicycle-built-for-two, tandem bicycle, tandem -bikini, two-piece -binder, ring-binder -binoculars, field glasses, opera glasses -birdhouse -boathouse -bobsled, bobsleigh, bob -bolo tie, bolo, bola tie, bola -bonnet, poke bonnet -bookcase -bookshop, bookstore, bookstall -bottlecap -bow -bow tie, bow-tie, bowtie -brass, memorial tablet, plaque -brassiere, bra, bandeau -breakwater, groin, groyne, mole, bulwark, seawall, jetty -breastplate, aegis, egis -broom -bucket, pail -buckle -bulletproof vest -bullet train, bullet -butcher shop, meat market -cab, hack, taxi, taxicab -caldron, cauldron -candle, taper, wax light -cannon -canoe -can opener, tin opener -cardigan -car mirror -carousel, carrousel, merry-go-round, roundabout, whirligig -carpenter's kit, tool kit -carton -car wheel -cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM -cassette -cassette player -castle -catamaran -CD player -cello, violoncello -cellular telephone, cellular phone, cellphone, cell, mobile phone -chain -chainlink fence -chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour -chain saw, chainsaw -chest -chiffonier, commode -chime, bell, gong -china cabinet, china closet -Christmas stocking -church, church building -cinema, movie theater, movie theatre, movie house, picture palace -cleaver, meat cleaver, chopper -cliff dwelling -cloak -clog, geta, patten, sabot -cocktail shaker -coffee mug -coffeepot -coil, spiral, volute, whorl, helix -combination lock -computer keyboard, keypad -confectionery, confectionary, candy store -container ship, containership, container vessel -convertible -corkscrew, bottle screw -cornet, horn, trumpet, trump -cowboy boot -cowboy hat, ten-gallon hat -cradle -crane -crash helmet -crate -crib, cot -Crock Pot -croquet ball -crutch -cuirass -dam, dike, dyke -desk -desktop computer -dial telephone, dial phone -diaper, nappy, napkin -digital clock -digital watch -dining table, board -dishrag, dishcloth -dishwasher, dish washer, dishwashing machine -disk brake, disc brake -dock, dockage, docking facility -dogsled, dog sled, dog sleigh -dome -doormat, welcome mat -drilling platform, offshore rig -drum, membranophone, tympan -drumstick -dumbbell -Dutch oven -electric fan, blower -electric guitar -electric locomotive -entertainment center -envelope -espresso maker -face powder -feather boa, boa -file, file cabinet, filing cabinet -fireboat -fire engine, fire truck -fire screen, fireguard -flagpole, flagstaff -flute, transverse flute -folding chair -football helmet -forklift -fountain -fountain pen -four-poster -freight car -French horn, horn -frying pan, frypan, skillet -fur coat -garbage truck, dustcart -gasmask, respirator, gas helmet -gas pump, gasoline pump, petrol pump, island dispenser -goblet -go-kart -golf ball -golfcart, golf cart -gondola -gong, tam-tam -gown -grand piano, grand -greenhouse, nursery, glasshouse -grille, radiator grille -grocery store, grocery, food market, market -guillotine -hair slide -hair spray -half track -hammer -hamper -hand blower, blow dryer, blow drier, hair dryer, hair drier -hand-held computer, hand-held microcomputer -handkerchief, hankie, hanky, hankey -hard disc, hard disk, fixed disk -harmonica, mouth organ, harp, mouth harp -harp -harvester, reaper -hatchet -holster -home theater, home theatre -honeycomb -hook, claw -hoopskirt, crinoline -horizontal bar, high bar -horse cart, horse-cart -hourglass -iPod -iron, smoothing iron -jack-o'-lantern -jean, blue jean, denim -jeep, landrover -jersey, T-shirt, tee shirt -jigsaw puzzle -jinrikisha, ricksha, rickshaw -joystick -kimono -knee pad -knot -lab coat, laboratory coat -ladle -lampshade, lamp shade -laptop, laptop computer -lawn mower, mower -lens cap, lens cover -letter opener, paper knife, paperknife -library -lifeboat -lighter, light, igniter, ignitor -limousine, limo -liner, ocean liner -lipstick, lip rouge -Loafer -lotion -loudspeaker, speaker, speaker unit, loudspeaker system, speaker system -loupe, jeweler's loupe -lumbermill, sawmill -magnetic compass -mailbag, postbag -mailbox, letter box -maillot -maillot, tank suit -manhole cover -maraca -marimba, xylophone -mask -matchstick -maypole -maze, labyrinth -measuring cup -medicine chest, medicine cabinet -megalith, megalithic structure -microphone, mike -microwave, microwave oven -military uniform -milk can -minibus -miniskirt, mini -minivan -missile -mitten -mixing bowl -mobile home, manufactured home -Model T -modem -monastery -monitor -moped -mortar -mortarboard -mosque -mosquito net -motor scooter, scooter -mountain bike, all-terrain bike, off-roader -mountain tent -mouse, computer mouse -mousetrap -moving van -muzzle -nail -neck brace -necklace -nipple -notebook, notebook computer -obelisk -oboe, hautboy, hautbois -ocarina, sweet potato -odometer, hodometer, mileometer, milometer -oil filter -organ, pipe organ -oscilloscope, scope, cathode-ray oscilloscope, CRO -overskirt -oxcart -oxygen mask -packet -paddle, boat paddle -paddlewheel, paddle wheel -padlock -paintbrush -pajama, pyjama, pj's, jammies -palace -panpipe, pandean pipe, syrinx -paper towel -parachute, chute -parallel bars, bars -park bench -parking meter -passenger car, coach, carriage -patio, terrace -pay-phone, pay-station -pedestal, plinth, footstall -pencil box, pencil case -pencil sharpener -perfume, essence -Petri dish -photocopier -pick, plectrum, plectron -pickelhaube -picket fence, paling -pickup, pickup truck -pier -piggy bank, penny bank -pill bottle -pillow -ping-pong ball -pinwheel -pirate, pirate ship -pitcher, ewer -plane, carpenter's plane, woodworking plane -planetarium -plastic bag -plate rack -plow, plough -plunger, plumber's helper -Polaroid camera, Polaroid Land camera -pole -police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria -poncho -pool table, billiard table, snooker table -pop bottle, soda bottle -pot, flowerpot -potter's wheel -power drill -prayer rug, prayer mat -printer -prison, prison house -projectile, missile -projector -puck, hockey puck -punching bag, punch bag, punching ball, punchball -purse -quill, quill pen -quilt, comforter, comfort, puff -racer, race car, racing car -racket, racquet -radiator -radio, wireless -radio telescope, radio reflector -rain barrel -recreational vehicle, RV, R.V. -reel -reflex camera -refrigerator, icebox -remote control, remote -restaurant, eating house, eating place, eatery -revolver, six-gun, six-shooter -rifle -rocking chair, rocker -rotisserie -rubber eraser, rubber, pencil eraser -rugby ball -rule, ruler -running shoe -safe -safety pin -saltshaker, salt shaker -sandal -sarong -sax, saxophone -scabbard -scale, weighing machine -school bus -schooner -scoreboard -screen, CRT screen -screw -screwdriver -seat belt, seatbelt -sewing machine -shield, buckler -shoe shop, shoe-shop, shoe store -shoji -shopping basket -shopping cart -shovel -shower cap -shower curtain -ski -ski mask -sleeping bag -slide rule, slipstick -sliding door -slot, one-armed bandit -snorkel -snowmobile -snowplow, snowplough -soap dispenser -soccer ball -sock -solar dish, solar collector, solar furnace -sombrero -soup bowl -space bar -space heater -space shuttle -spatula -speedboat -spider web, spider's web -spindle -sports car, sport car -spotlight, spot -stage -steam locomotive -steel arch bridge -steel drum -stethoscope -stole -stone wall -stopwatch, stop watch -stove -strainer -streetcar, tram, tramcar, trolley, trolley car -stretcher -studio couch, day bed -stupa, tope -submarine, pigboat, sub, U-boat -suit, suit of clothes -sundial -sunglass -sunglasses, dark glasses, shades -sunscreen, sunblock, sun blocker -suspension bridge -swab, swob, mop -sweatshirt -swimming trunks, bathing trunks -swing -switch, electric switch, electrical switch -syringe -table lamp -tank, army tank, armored combat vehicle, armoured combat vehicle -tape player -teapot -teddy, teddy bear -television, television system -tennis ball -thatch, thatched roof -theater curtain, theatre curtain -thimble -thresher, thrasher, threshing machine -throne -tile roof -toaster -tobacco shop, tobacconist shop, tobacconist -toilet seat -torch -totem pole -tow truck, tow car, wrecker -toyshop -tractor -trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi -tray -trench coat -tricycle, trike, velocipede -trimaran -tripod -triumphal arch -trolleybus, trolley coach, trackless trolley -trombone -tub, vat -turnstile -typewriter keyboard -umbrella -unicycle, monocycle -upright, upright piano -vacuum, vacuum cleaner -vase -vault -velvet -vending machine -vestment -viaduct -violin, fiddle -volleyball -waffle iron -wall clock -wallet, billfold, notecase, pocketbook -wardrobe, closet, press -warplane, military plane -washbasin, handbasin, washbowl, lavabo, wash-hand basin -washer, automatic washer, washing machine -water bottle -water jug -water tower -whiskey jug -whistle -wig -window screen -window shade -Windsor tie -wine bottle -wing -wok -wooden spoon -wool, woolen, woollen -worm fence, snake fence, snake-rail fence, Virginia fence -wreck -yawl -yurt -web site, website, internet site, site -comic book -crossword puzzle, crossword -street sign -traffic light, traffic signal, stoplight -book jacket, dust cover, dust jacket, dust wrapper -menu -plate -guacamole -consomme -hot pot, hotpot -trifle -ice cream, icecream -ice lolly, lolly, lollipop, popsicle -French loaf -bagel, beigel -pretzel -cheeseburger -hotdog, hot dog, red hot -mashed potato -head cabbage -broccoli -cauliflower -zucchini, courgette -spaghetti squash -acorn squash -butternut squash -cucumber, cuke -artichoke, globe artichoke -bell pepper -cardoon -mushroom -Granny Smith -strawberry -orange -lemon -fig -pineapple, ananas -banana -jackfruit, jak, jack -custard apple -pomegranate -hay -carbonara -chocolate sauce, chocolate syrup -dough -meat loaf, meatloaf -pizza, pizza pie -potpie -burrito -red wine -espresso -cup -eggnog -alp -bubble -cliff, drop, drop-off -coral reef -geyser -lakeside, lakeshore -promontory, headland, head, foreland -sandbar, sand bar -seashore, coast, seacoast, sea-coast -valley, vale -volcano -ballplayer, baseball player -groom, bridegroom -scuba diver -rapeseed -daisy -yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum -corn -acorn -hip, rose hip, rosehip -buckeye, horse chestnut, conker -coral fungus -agaric -gyromitra -stinkhorn, carrion fungus -earthstar -hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa -bolete -ear, spike, capitulum -toilet tissue, toilet paper, bathroom tissue'''.split("\n") diff --git a/examples/pretrained_cnn/data/laska.png b/examples/pretrained_cnn/data/laska.png deleted file mode 100644 index e0ed7a16daaa6359eea51cae76c43953f12329a7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 101910 zcmV)YK&-!sP)00006VoOIv0RI60 z0RN!9r;`8x010qNS#tmY3ljhU3ljkVnw%H_000McNliru-~ts25dwGDf&~Bo02y>e zSad^gZEa<4bO1wgWnpw>WFU8GbZ8()Nlj2!fese{03ZNKL_t(|+H}2HtYt}d9k%w4 zIOp8^hI}tyW>!|#Kz4Pvss~bRvb)*htmY)k5^0(yVak$WK!8nKG9bVJ<>AX^7%&aU zKL$JivH}12!ylGO(6CI>v#M6m03Ad=9}(4CnB~#_TF*g+?UEi zqF%oD?!D*4iP*#1Yp+e+`b0nl%#55fm>D7h5rO^@FayjC0LVFmi0aRY2yFir03w16 z0TH1<00Fuf>uX9W`ZN1KGlPnLp8uPfF-;SQ2-7%12hIgVh7dqR$Rfx@AOH+NN=5*IBtwd=Z(s<35CTfcC?x~b?2)2tLP0OA zkX~B>5C8;1U7u0}ObiG^2)aHj5CNnVK!895Ce~#WK`FL`0E7S#K(CP>3lRZQASK(Y2?PcaqZ9!#0W8Qeq5b1~6-3AaeeR`BGpZtb4CaOIcJnoAX30g2!ZQ?nh&a87~4i^ z4Op{ndWhPC=+_&h=ryxlCldrf z5xpMU+x38_l$rwuu*}A&l!9>_F$_aBD)T}02Ni5_bPYH#O3nzjNX(2R1r!(*2xS@p zB8dGgC$aZsHDoB&2!_BAneQ(ZV6?r`_r1Q3^gsdv0suRlzKQ%Ei3k(|G6=)~a?;O} z0%QS`ZvI>ZdR*;&3AVZQ{R7#-0>B|)%1QTvtu0FdM8F)NA7pO~J&c9KHxYpz0QR0G7f>MtCP)ED2qBoWGDA{IK`KQLBk7{4nSvh1A_*dbuIuWCs|U>vqFTgK zZzP1E*4Gx1`~yWW=A@gKbpa!XYLVMb?T06iQ~ka#nExrIfMQr|{G1Yk9Tp-6Q*1VC zs9(2-(Y6mbFp_V^{rhzTTu$gH-P z8QU}g%%A|2G?~{>gGdNwR5aSidAl%#G4y~vNVVnx1j0CtU^AM`ttZDKnyH^dq~3-J zQZmvsp$id#)sq)rkFG~bQ#Dw*O!n9dm>smL@E`$h} z2a{%x>ucr+B%?q`y}cBGi4j03A|S9HL{g7GKSMKeC?InMc}k#e?^t{@`ArZ}Jv<@> zF$Rdtp3OJCKiHD9ZVn4sr;=-!Dk2EP_2IW0qI$u!43FzZ@xuqHo~pfuIZW+sQRPZF z9GKCuzSoo!V%K3ZW8v3FBK2XlhfKY$hC*cT;h$MQ+ct(?zi;%Y8_vSij10+wNCYZ^ zR0;|Nq3;j^1ClMg)isBNdOslqgg8GBb#yd);{rr~I(QWHd4ctf70|(NteSdmH zRt+)<3R@H*1QJ5)=m{E-5$ORH_p1I(L{LbLb%_1&np<@LfRs`_oa$j_YqJPXEywC& zT;!Z&Hy)ySPm9&t(60@3``9fGkROH`*Ee3F_2AV@bF2&0bzQxWoHN+1vn?zUfkQyH z>+>y-kJBVnEamV9O75mK74Uav4sQ;of2j2gx< zW87}BTCKo#vl;q6p3r;FxVsoNWGWiUd8p_)Wd)5Oi`TeiBePoWDg5WY&@o0#(}-XP zC|j;2Oi0rNGGh^%!`_DIe$6cNd$ze(YY;^AaHf;hfWP;$_msmBkbH)1u88~$OeK@w|6L|%&yxli(hm~6Jnf!9shnQrFw?w zfQpB1ogP#NOoCgv9Tc{DK+KEOdfyr@Gc6F#);o&`N+QHA)}QB;&<}%x6b2w<1A_X# z$7WIhCQHFIP3Rgc*XFc2X9Q+lD2o_;ZCW%}4M;af24seeBBq>yfj~vQFBL(*S|R0x z0uB9p9_;gN)3F;H-K=0oBHgsHeH{=;5zCst20=DpBufH=!HEpeS%PPp=L?8R6Rik9 zf?SH_x&=W(J){l{iR2p2m1N+T<)*O6C>snK0#(;pvZKzykE?F%nsQLx4+OIH&zRCQ zr#g;w7fp{c+@m+4rN19EHE93LX%-<%(!5VZJroE`!NO^MCM4OOb6Mn?L>B#+2NtNY zsYq!EC|PrH0Ei*fd@>j$R@m}(^^Dg&IxT8M}k zkW;FbQu1t(+t}V0rsEk1@;I8AD;R^oSHk0(69^DOzX)S@!`&h9Y`F;-4M|Kzy)bsr zR}Ve?8v8JBV0|F^dtKilXWyKfylco?5W7yd0uwItwr;W*3`l^0tYpANLt$b9!E(L2 zzCfm|hffG0bPyp-V?vBQ0ujnM>iyWkEi6FL%|Sv4lPosjZmLj_Eay?*Xy7Gso)AMs z5N-~k9z@N7th7T4C^>*vc+Q6i00mOdXq#0L`L_AsQIL$SI-gdrZ^V06YW^KuPL7ODTpV zl3CIMhSsIr^DxhV!I-xIY0avO9&WON8#!tDM1T;q3^L_WpR5RSne^FB8h*BhrbYlF z;OHT0fs}$k`h3XiVo4NevXY620z0}_A|^9P#Fn>05WA=b8Dd!<)zA(WzA*<#5p@8f z&&6xZ%ySq^5JbZjrZnpPM$IJkU@%D3%QG{A9dtJqAkuoW)HD^_?WM*TOqjCf<9*j7 zb`eDs>`S({qd5lmp3I8c=B(HX%pui%8@q}C#}JXS?0`mgE><2cqZwJrQ}r1G?Mj!k{%$>QN0Y|bwNE4^CI_sUvq0f zi^_f9SF}}%dtr4)eHZE`*Y!>Yj!lSNggBB&nqx9Wq~vQL1{^WqxuxNnMH0Hu!MOwD zI4-jn_Z`I-iZP@G~b{oCSc{>^Uj0NIZ-`~qOrD$o1$k0tA8RTN#Rrgf|$qa2L5s;Ba z+RyguBv#@CL=+pzwkHkXBVrWcXVj$aJ!p1%#Ov$Qu@%kP;dH{Bf@?^z8^X0{XvqnE z--DQIksem$6cKRAi-&Q=!y?oNF-=oFXzm$9Sk@-nht+l6!jk%74Sp>-V}DpHVrH*T zR>uN+D3faF(H8&7&|;3E=EFpxZUTqgoxl;8b;DQmj=@ZDpPf?H;&FzND1z0PwlyMg zmP_@FX$#_Lxk^Av*}~Oe$fD&bB36IpYS=gkV(d|pBCHF98j9q`PCcxYj3SEW24>A~ zMbP&hq-aPg4(td}N~$@Yv#;2oe&0L9T6z%)i|6Kt#SIK|1TY(EPh~2A)c`ncNv#qr zgcoSPI}1l`g<0#SDM8G$5g8$(NZqrQgc8`;H6AHo5eY78y{Z&lv2PXmWP(g^3b)>{H=)B=xDKSbKQ~s{5MgHCL$#lWDBd?yxomr zvb;u023YeOE%+BO^bkYb%K%~&JGXsai)Oy+-4stK&I7sv!p*rWEei2-sxTW^-4kNb z1J{xXLycPN4W$8tga}E*C~6k{80&_NT~$cK^$-FCP~+sq{vw*sF)gmSdPxWhX=tyW zF;`+oJu9O1QBOsR0fL(QCc~Qqfx7+II%BvvFyV=6-*@#yw+x117#15p+d_y@vf$iP zf4?G&Ft9EBmr^QV)82r8?WKia^*$7-c@2t&p%pTAG>){k6NoiT63Z3sy>rgUDIweD z4@_6MxAVaiblp(l^RDZyXw6Q|ScJA+*Hs>-8jxgvXHKMp7tZ=%LIp-dfMMv3>Ef_| zXuNg{fa&*Ap6qaF7|DUuKonkth1Ebc+~6WvYsFne*M(aDf|U+JRK&3E3#OV2yXWd+ zfINd2tgJPzz}~Y+6ZX`q0d~Z?1qydjbiZ#huB$jk;Lp|xD@|HNcGyefaRYPBJAKc3 zdZRcFA#$kTrr(zv!-`SR>3bHux6c9oLA=ErZ>t&^S#7Z`>>xhx9T&>V}ni7D3u8S4) zk>Z=qX?42QI1qB4AlxV(qmW!QCu9bUKMYdRLV=OF3qg{CK^_Jx0l*9%GK9rwfePxOpn_2)XQT=U&I}E|ilP+6@t7)bK;*FgRI`${Y~_7GeAX zX)YN+M9E1#O^B8U%gk>Cuv)EbgU;n7T|Z!)Cd5A0jSL93IG2KHyG0BE3{-?vP}JDizu4>ArKnMYu0M$qEkg5* z+j{$qHK<`ja}IJ!#%Wf(pxx8eG2~pkY$)vAlwck(7q2mfZKMNsP+^bfo z>pDzk#n^Hw8S^4M^}|$)^gB$4U1C7bytw%G^L6u1)!MW?TP-69`qMW77`j66*m@x-WGQn-ex9Omu~Ct^hdMdN2fdi6>~u0FW#ILd~mo_g@R_v=LMD5FpGTu>MePGDMGMP8`IZQZBr)kj5?GD{K^FrEf|4?X7zN4&QWWL0HaBDFlX{YZTKFYm1Tqie z0K5psaf^}_5a7TV(?&Nf!>C_Ck!<-}0A&p&r<5^G3ETyAeUDtSt+)ayF?PyWF=mq@ zf0~bZ$)V(oJSGGVpu$K~1{0(2IzWIlCd8l=tnFq~nIe7|+BUfuBbWlD5F`uoltD0F zDFx)BVLCU+CyC*PLMTZk7OhHpw{psYr6H+$9Q|yw!oqsstRK|F4>uGJ00q$vBM#e_TzfLXN)02C z>iL2(Sg4^0w)ymuNDmDqtpLx4EP59S5o5|_*$i&?>Se1KhYBA zoHM%EDbF@d$dYQa@9s4!+a@kNS{r$I=o*bE-tIpy7r{|&%i%**>Rm4C-NH`4?4mqH z1Y|Xi9NMOHHIxFunAC_z(&t&NgSSRyxq?YZk;>#ngA(s{73mW2B873}*SWz$;@s3D z@vyW-Jlnj@i~b0_Xwt3yeIiEskQ#!vv3+hfc1n|lwDZDI<}0!OG)+xc1g?aUv#WQe zNxafj?6G_d!-6i?vQRi^$NgvogBGZoH&niI7(HYK0aG3u&f=1g%*2Q=V8?P^XC~E< zDC_#=Y$OM=2j3V-5AQ<=R-_LVQ7tl)FuWJi?_C7QLY3IvcKXk2(w01|mKM+$Nb4q{ zne;qOlcIr!-d3;GqzPV+&FNY7!zDekMSXYMp}fmR!$|>$sO1Mk%!6Q7C1?QjCtR37U zT7z_>2WXfm%V)Byen5?9XbtAB;i^(67NHOk3IU_{XVUD2YOV;8=+6RH?YO)~fOxz(-VS}C<*)!TT|IM$v_kZQ8DAeM4LHLr5Z z8z`XUf|3P8H`ML~rcep+-d`xqgDIl9VApkZ!}Yz^v0OXI0S^yvVvN{qHWj%fLX}0) z+;}#eTKbVIeSBrX0gjem14UA;oq9d5psCYZ@*FwX8fA3d>~VJwn}a&F3#J9fb`bQo z*zaNdloKd&1!I=9INKMXD~7Zp?pB09K03yYlj|7AQTIyi@R&iVx%Oyal?oo9$Q)wM zsCL67Ye2Eq=<6O)B?5K>Z#amC-zH1qA?$E)Bz^$fVtb{fL9{N{uw90(*PJ64z7$W;zxSwiBKfL@AH-DuglAG?Tg zJL*B?04WLDtuyl}m0g0xnk_k#O^#H1@XTAZVK-^5G7w>!G~XS1Qypl~Pfr1^2xg^( z5S`rvqlV?V*AoD7T$BL(V7p;zLne?^4Vk%zuu)bpfFi(n$|WP0$=D@Jekijl1T4YL zB_h5XvbNB_=ko|u%Vpkl?2X3^vV2>vyq8i0GlDsRJ1;zsuIuroFMSci(4p&;)URDK zqS1+W|J&Km%0sGsj0@S05NZzHI*2wUVv8VV1A;}pxa3)SBkICs<$eqTjP0#FqTv0##aPY|%t4!nNHRUTc_2$y!1&NevBuwbVhZ zHC8VZIjWpWL7qlLS63k27rXV=Tr^r}Ngr*<+~$aF4YJ|_)+eZy973>=&v~@ZV)nGg z+COu1ae){kZa;S$U-|NvY6p!7pcPe9<`bRIJ&Vs`iqU0)< z?(EG0sW?nvtyBO&)I|%3k*de=#Y!n@$1=^mhW>y9JBwG)^}_-gOGy?`dhD$UIrd0t z)UpCKa(Q{lXKeckopeyjv%QX3*ZFS!vDT?%t}V3<%h2 zog-2ofGu#AB>+KO85@y;waI8R(QduWiCs(t3K8QtLP|kr`a26nvtk_vRoSdm&S>3$ zqP5A&gfRi7mOSlW&-Wq=ZfT`Px+)!qnG9|09=0|+Awo$R8F`WSw_mqF$8OPkX0+f+ z$Hoi*XQjUw1?HM}3kts%*=_8w|6nk#-F1-0?HHhX6^ zO2O9>v#w1G5H7ragD32YtrZBwFoZze@ETd`2SE>)yIrz}C`F69X`E1|grS@Hr_2E< z7ey9D;q@50+N(w(B9)}@yU85H5H;U~Zrp4Sr_BeREs?MkvkH~Qr3yungGnL=Yu*Hq z3<_^h*w$(l-{3~z4KY^F?vjHPEsJ1a8YgQCrixZd$;w`WYSs`di(^V#a8Qn~o|$4q z>x956>E}|4b1$Rjj5%pTD}{h$4-6q78#9M2ye4Ka7=fe8eB?BXH+eW~h@I2Dv@6|m zC6bEnDiutz01fXgdK2w_3CtL}&gyPJWHkiYIHK9e^la-A$Tg6fG^UmVSW^W1Iz-L) zr55JZ8%5JlDk&S$+d4-`J(hM*SubSA_KY>`>RrXN(#ApxK=8(H4MRMa+aegXy!nWdwn~gpjkHDwh-o~I}e>oLqs-|gTmXPIJ<7`BNo;BC! zgo!VahdPFaI)LDY!u3-`M72%L0|d`inhmSgTKg3%!t5Txy4h7AiW?@+jOKb1?#u=C zS{rIE89Wkj%WpqdacKP<=3oE_)#A5@($&KoB`tVQscWIOVXJw2H?;1dyf>Mwd!!Vl zG!{XX63scGSgRTt9S$ILeUHnFE&4d%_~Zm{f8*=5RccIAO=l`d|j%vi>{isBQm?l$qvo8CR3sTYg4OzH_20UmP>+ZUNjFcxM z&d#nlnnXg*sz2(7K|3M3#m9p7hW9E6PDkRoc(XE@ek{q|KMw zX5zMAPZ|WaEl0S_Nedi#7#g~`Bhou}FL;e|CIrlFMoWp9#)uYE{BAqh0c9T%D`~t4 zl6CqAC@I9r*WBdjeh|sh7AL8DYglLd&x`4mV^E`@SdG?f@aL*Rt&=y8CRfsd#z`q> z#J<-(%mE>GD(68>v-m=~(b#A#6{OrPjge6gTVd%of8|E~T#Y@d;ep5krH?Uo*p8dZ z?HpD^-JF0l4aQvEaZBnti-Dv703ZNKL_t(_MoElr4X`NemXZNutkga{lto()L^niR zkajsh)!)?o&kMg~UM$3@ha;CN+S_${ZQF5!5CT@K9y!4pd$}UDZG&seaB&a3Bp_rk zFPS0LZIEi9h#MWAW&FH1bhqTw_JnGIeSWxYeYB#VO`h1Utpg+^`a8Q|##{@-6_xaQ z&*mZ9h9>bKoXI-Hw5QWS47TFA7-J<^dNcR!lWo%Gc~YYWk!`#};Mxt(HhO^!!8ZN@ zvUtgZK!Iv;U)8^@li&1#ilr({z?7!C*htjsos<p z*%Dy28nD@HYN%SlAyvYvz#q}9x2qjHsm>oE3!!tCrSzzc3aFh*C^ptWl?zKHw{um| zD5Ze9^|2JBT1#@se<8J>Wg%(co371KcDaxKyGrU^c6djtNrft9P?}tU=4Mc%K_Qen z7h|`Ur`_6?BkUrT9?F!frH!^MQt<3<0BF2!C zm7=adAJUGR!a^B%=la_F;TU%Ec&d^R)JSD|M9?0NMuKL3y+G@8L_i=U$@9VXXXSan z2sn0X6mqGb=g+WZZaI&WqmBd6=4rv+-ahu$D?EJfJ;V^u#i-68JK$3mtx9v(4z$%yn$}$Oyfv#)D}=9Q>^0#0OY{fZd5a9GGe_; zvFw&=6p)ZP$}|7UCPJ-kfdo)6VAJY_#DtVGw&S+O=(%JB3JYobwzz-Ai`C7NXBTXR z&fTJYrK07G7};uFsIxuvvnCD0XjYYLiQF}0K(sJZA?IDKk=RF!I^9CGH4$M2#v(!sX)1(`_ zvq??%`~A=>Si=mmjY@3B##+vNLX>tlStJm=lSHaIx94-Sgrw!SlWOW7A0Oh+4Lx=P ziW;#MaMX^EV%|T7P{B*xe_8{d#+i{-xzbju3z3l}>k&8PT(YHF)=^TL#`Q>GOlf22 z7HLwORb^T=QWBzsB1$R<^EJ5WG{fX2N%^q~6rve5CzbN(Y?-c$fD~lO%FeNM>ADVq8Jo*X z?CtH%HS4GY2pm*i?5&)UBmqIoJaAoBL)DWj8`^%2 z3M7ytrXm=I0o!rg_Vlp>8uc~C$fcqo9YB8`cGSy)%QI*b$@kwU&h^4j}?TD)*R8wO2&YaR3D;76fa-QUE3;MPs>( z5Sl>2=2j-HY33}*3E(h5MCC|Sea+ZQMFqfoj-y_^PQ4;ml<@KfWt3oER#eiG(dl|7 zpiBjQjG$6fGuJ}YVuznOB6QZjSw!cJIHPEqjCKfHPtr+ShO8Eur%nZz+sHfeltdtk zwWg};J*-~S+MkycsK)-}#oIXOq?~3HmdjBn=+n-g@LCy6JH-Kpx!63TPPdh2CoSctZ9^HrE8)(Ya=JJxq1Pih8(C6 zyK+C3f~6zOq=rAL_u`s3OAf88Xt#PTyGW^+*2;phf`(}92GHg_v{Ln|jXJYI*g%Uk zrTV?dnkV@%8Ey`=59!=({i!@*S8NCjX(mjGO`IyuS)DRSfT15|3CmUyJ!g! zl>(xI5KRVA7*kS)R_HoxrwM@tfyF+H85F+~6VQ#EY{{nNJ^y=^3cKS0DqO((WS@f-#P<5?lIeJqAbJew0I(-Hflo}mFurSk$ z!7Ah62Z@M~T~ns%n(#RVC4j;yAYi_2>0gsGO0jUCP@u~PsoG!1Nu)w!}XqRR>v zsfV=4tY+4#)9sFY69e;F`WYLULx;3XGqtZFQz%`Gt-gmVqGFZ881#9o+IOk7PaWeM z0NO_?1)LIqj6gtN3TV*^&v!hg9h;4J3{;NklBq*6s zijp5fPf*IR>e{gM$PzJT!8GZtmIy%z9rCDU4y{qn3^Agjl0YmFwo|AAr$)eM_Z{B7 zcMq}SD$D4EpMSM{uQbSm+Un?nJK_1(QKFrZ12&^Pra)tH* zL5N0`;u$<4@_8B=A#^qyfhy&$7A8axnHFU3q3`D|5u2yj=3E}Ws&H~aHw*?YpsEl| z+j)v_u6bOjj1=b?Qk^r83Tu-=`fRfm=X#$T0cs`|>69d@qEm8pWSuMpgj@Ymr&f7) z3K7*B71cW6+?U7Rg40a%ZOal%xe_e$6+PTKnwtL`>+AX}~mT+R<}By4+&u2V6hAhNthJW7QuZCAdzTjlqMd zeZ%yZ0{WoD0Ud)7!P0~C0HlbQpMM^I{}=vU{L=sSKj7zo{$Isk{jndw!w2u;PyXaj zaQnFz@b10)D5T}Jl$3$yGBf*o`&g~kc>MU0$!Jn-+>W6~j6HHnih+1N5i^)KkJocN zjpv`q+|VA4Ru`w7&^6_N-ErPol=M7LtZmC*#bRccvIzC`i|Ea1$9$^rd2QTu&TF16 zi$l@VG}f@Ql#C)f9%H9~oo(E=gFGoE#Gzkmyfy-~j^b_8R29B8CxlV|f^r^_tpU0i zD8*NMCOvPI+#IA*!;wmhGB4D!Y-%`YRKvFIskS#6@}^OPkIbHjlD6FzZC6c;)C$k@ zVBQ;p5L+2kgv5kHsKeCWJUt`WQzYQ#W&y1woQcw_;>bU~Af5B1GWM ztsD5j%_F>h_bn6&h*4*=bfL%jS;G3@0EJgb(}WPoHgHA^jBy&(F_IVA6)oi?puEBN zfBG}{#b5l-@I62B36$+A-g*7&*j`@ZGvEGU{OLmx1yddD;3Fhu z)obyoKK9_cZXTsQ3(ZO`U%;9w-FuNKsX@w49kQ9*2bc`tJg1?GIeExL%bZN0A&doe zKrXc&R|nrXz~K$MMO3-m$CDYOL?;b3vmf)UEu(`qbqbQg?m^pqLWD~=6%nP&kzD01 z+Qy!Y&swJ+H;Us$G+d6>1(O@y+T0A^@%P)EGNlUTgL$xYMXZTf>J;GJv3uTs*M_dU zqQPEAwZ3DUVPR5}fe{dZ$@);k0A#_zUXPEx^a5@k@8gpndlf(Uou9zV-|`~HGU4-I zejSf51n7?OU;Wqr5O?prk74gnqn*uUha#bDGlp&-z=UzTK|us@2L>>1PVqN?@~`7R z{SW^GOyL~BW60(K(tB^tCQ`c;H3Vr%3TDL|ouX-atMrI)a` zcYrT{@r&q(&ggCWJXS+j$G(TCwa7N~tWs4}$#2Fw0>-h_$T0-f_i?@3D3y0w7R6&~ zMjhvD6dtFMsbouSY;~(Vb6VNd$wQqbL59F7dZdV(qGWUTwik^(1ar7KTZy6-qH1OM zcRi*Lxl!*CK}F|vv>GzgG%54Nns3>P$SAPfPpor;7aBCXoKGb=4~s&2;J)%c$vk!& zVz<{_ilAgY$lNghl{0CmGtc9;RVs4Sx>pDWu*pKd=t}oYN;Us1CEIWwNBw32tc5pi zkYDeJ*cIXR8X{TVO@%8rcP@?-0v`ZEm@YF8`+y(&u216M{OKRTwf+*fj@LLjI>g~I z1FIu^@>Bm3F7q1k@MXO8p%3Hde(vvJ-1Hb$Ys5})dHTpik2|CsASl?|KZany#rcFg z&p(gnt_8gMp%-!cR)-17R(t!n^WqEO%?59M^>uv9r@jY&`3F9Y z-}$3IM&BQ*`X6PaGGS$!wh!LBhhZ3SxfyN0XROwPf|_~KG|LS3Vsv>!a40VK@91@L z?yZtlyuRKMgNB0>DH(MnocABj?KBzOaUFt5q~#m(9D)ErtU1yy>d7{YR2z-GT+(!~ z6Hq3lSf8&6ZiWP6QhNZMGsqIg@ zbh!5Gz@d_Y6$!P{0IsUmE$WIpmUAW(Q=_WQjjTt$A|BP2R<6qZ@fEf0miAZYWSYln zXMtF$w{|f2K-gUJOty%Ok_kvEFilc4RFo0uB7|2+mm@+7=w%-{M`X&l^};c}_xnDE zf9G%gHGJ16z5{pP{w6;B!B;>*5c;S>&H|)Kkk8N2bpuw1*O4zLBpT3PyMa=)9}_mT zR8T7TmrH_72_0Dja3oCW0-Ljk7|Mupev0kA2RMiW(wgw(>HB!&b6>>6KfjOf`Kv#H zfAZC@;@|o^zX0$tfP#Kt+`4sw&GrJ1A3s5wa$P^tK4h${t7)1ximY;|vLw4H-b~rD zluzd2Om%M{f(o0H=~k~f?KcFjO_6N#{}h?CHXUL_lUX3NAO zx9r+zCw{@y5NfA^t&g^@HA87?O?mEy2X*LqADlXHo^W)$Mk(hwxxT_r|J0A*zyI%k z5r6A%{%g4L+zYsM`(+%T+{SAEI)?odgy9H<_YmVg`ffnz25h$zo<4qxZ35PZM>u`& z5dsK(?2J)m8bDT_%A|M#F`lF4^`UT@;VVPtE7fbOyVn|t}YPe z-K*Iq97(+a-4L72W44JTnEyBA){wOrYBE_2HqnaoRSf*p(l=w0Vj7*m=cMCiI6 zY+5hv)Z1PDXf4=EJ8&h`CfT{hgql0&ObZ#m-8LKV|9sZO-rb2xtpZuA<}E8=`h#My6#Qt@RefS_;W!HUQnws|atN zkQ=gs=3+gFfz>eRkj$JbK-CW9+0}oola!}Y(Zgq{WD^lqtCcDUjH>4vz>C(imPMo` zjD8_x)Jjz@dLMT@bxYEyif+yKhheCmyKS9Xkl>9AoJ&!*Q(*Kq^CKI_G8(f*Tf%^j zd!P`e%L&I_#NYUdAHu)=@BbSZ4ioNv{f}__@H&tPLpR{;4}KSK{`0Tm?pt?p`u;gC zE+#M!=vMo9^`qa0{^$_LAAS+ff5*omeTQ_p#r5MO+&no_X{eNxZdfv=vkOQTjGGbb z^`6coNfS1wPeGK?l@UB`A>$U)_7YhHPtVWs^z;l{Bv2QDR1hg(Iz7k3H{QZ`e&i$g z;42@&*WdU$4h{}*a(o?+A3VUtc#ie3!kB?9BW~P$4tskCc;jnd*EJc|MqIJFq=|p2 zer;J8UZ1%^Hkj2#l2a;mdUeX#&{X4SPTNgog@m9B5N%`3vi1cf16gL6JU78NV$O#N+cF5J1F%)AJRTg7NsxsH&92y)%zeXo0{w3715n-!W2&7FHG zTkq2<)h)%3Lv0@`*~+>_+nS1%P+Y>YvSzHHkHuBU(+-UB2t?ERyPl z8+v2go)szsNF}_)y55WEC6ip`y%5JlnlAK4paB_^l^ohTK@B7{l;*}Q%q-g_A z8U548IDP9q{Lw%CJ$&;kpNBkoh`seb&K{iN(SuXmdhR9Me&tm>dHwe>agX=!z6iQF z#d9D3DDoylwNHw)h;FP||2xEmFs*IU`69qtppeD*0fwLL+@smF?#%hra#m z&BT?r>p61t5d)>t!2DYDp{99C^D{I(+lb;>pSh5;Z}P$2S&~;rBWNBYkVR;`D6M22 z9^(oOo}1V44W+0G93luSuyhDn(8(4*`QzV@|KdOY`}m$8{20z2zJucz?x4TS_}Xv$ zKlr8p@qfaDuYCp29|xS=Bmn1l?)DL0{FdjiN#{5pA0w;;*H89wxL#v(`Vuce?X6IPU^|{;@7g}boUsKU z-3l8J5IgMcAL8)v2!#W-B3SPoAZ-)+;}d-0&A0IS>tDlqy+)cQ99=uY-Meq&#>ok; zpIpc3*%{t@??Dxg(rIUk#{1mHDmX%12f&hPIWdJQ(naFmr>fRoTuBb0f2}mlkJg)^ zs&Mv8lUsZf3rJF@3(*rp0|aaIEJ(8?ZoMF}kt@QJZ54qGr6vmJ6@NLYxV zi)biEo)4RpUYbliiD&ANp@z~hf0oVB%R=`YDuzXG;GCR`Nx-R)mlNK7oVgZwXQIJw z7$2UYakE>1-#x&K$2*#&)wsj9y3&Omg?{@o>jDSUok=L@z*u9v$awDh z5nlU`{~rFmzx^|~yts$M<2Ck(FunCIzWH0fkKg&0p*_~0uiSYL}cIM~O% zckgQFLmzSN=o)U{c^+|KJh^y^7zwf+aqk;%VAxz@7|+3{k3df@5YGyx$4_v1`UL&n z3T1zVc(eyT=&{*eV4AiFdp(B3H9{XDOb`Tw;Q)unH-MCYIN;#MZ7>*n*RNym`Y|R5 z=->d4OUAGN>aXMa@rmjLTlRN&xR3ky?qNzcY$v74!}Ts6DWy6iwi%7P)Y6=_))`Gb zuNqFGxkr^q6~+5iZ=5_W5ciR*lC+Q5o^sMn#C~$5=?u`rOR`k!o@rS{A1n)!2k5xRqr5@5xQ2tHJw;uoM~H_9ppHO!i#`^FRlq0{;(iBV;PmViU0@7*d+5Rn zVQ)a>h-q_))zJ~+=_Shf8TM}8#Mv~WgW!YD-^M@x@>lT(zxVst-#-FL1Z0b&Yin%B z0r%d!4{$^mdQ8(61X4n~fm_+MdI-VDHeRkE+cRY*d_>lLwgBcV(lah>pke(QvdT&L zrX`zPH06S=jX%~=*?At;0Uac&bLX|#UEu0%#YTq}Fc-w0P-HSqO^c_qSa(I#Sqc_P zIlnX*@tcZ(S;^@-3t{#;+I#5|3k(WTdnmOcs$JB~%7_vDcd-^3F`|L?Mk{-m?-QMv(NRDk#9FR^~%HikR5&>gNYG9d~9nb3&<(+21F9^&Hddx$h3gbtUF-h*I7E?aC) zpCD~FU;-ipJx5UJLF)tDe&rS1eQ<`~{MFw>+7J#8BMzJ(9KY zxD*14Hk8M%0|i2nj4V3En1U(UHR%dj1sxBd5JI;FFC!Ae`HpU^T%|ExQPIIwMd~ zUa3>!mtsV+59M}{001BWNklw|SXKP|b^$h>PIm)0Yi9Hg=T znFGdYR7BIbjMS*=)!|BZU%3u;rdH2;>~y8Lg*P&0J`-GW!&QFlPB0*o?+aa;o8w5%9JslJq0$G zINV#|^u7BiQ^ww~!sWZ~B8(%pPoH9YevW<^5W9fk`U%pO@W+4nCA{_aeHsbJ$+hh#a_oIc2$8qk9XhEcyfsu`4=GLZiuDs`(gc>%ehO_sM_Wiv2sF7 zB?rv8lxXWpnRO!J5-irDC@*v?OtA<}GTp(RE#I7i$ONlu{5o^`O~Awn|3rL5(45 zGq|kq-Jkk6e)zNBhQot3xW5iTMA~fdXTST$_|0GX72H1PkhhO;e)%3wFV2ut#NPgO z9G~1mk%-TI?u&Tz_zat|AcL{De+@ToJqL~f*KgdwdT*_{Q`h6^(+lkFAE7%uK)!Z> z5C6rFquG-}r4D-MERACv49zf$fCv{`7a_2Y=+V$Wrj= z;lr9D_#sj?_Kfvvh4p$pbDf)r$BX|9BVYf{Vyc1r^=s6QPqUg;yK&pWAKv9KGdTz; zm4^9a08mioPKW_1rkS_qUp$W_t26vu+A;)eO-oai#pnv<&H27MgjAb#yS`s|=&JSO z((+CyHv!;&=oj4(W&58j<#}1TI5Gm^CalkYtM2h&i zpZ!FFiD^+O-Vus7hX zH(tjFUV0JOjtIm!I5@z z@b1^YhJu3IFTIHKi!-EYgVo*&w_bP|fByDeeEId)ad^0g=Wm|i+dupv{Qe*OGyLHn z|1lmscz~P>#?1yLXB-?H;Dr}o!gIH7p&tf-7?+nD)s6~U{$Pvng4NaAMuj%AQRW+| zXDoT{yeQVCzu-*js7L7%!GFkl$Mi;S>~Ok zG@f8NZi}pLsa#5LI9_VF@=o!7Axlw2$DA8-n=OvdhWHi(s)F%s1vZ?M8vduo7^dSX zHY|mZE$oDdwc04jAlks!R{a|;hU|khy#`3on(UOW6!ra2*GTubO~o~jtOCv{w>9xwv-SNr(Hw|yM{;2-<~uHRbW;P`oa##@}7DeJb3&R`+GN$Q-{k@0P3-w3SNBq75wQJzKni-fLnK7zm=Z+;U8>pg5AKElQQhq%6f4Hx$w zVnvK{dWMh-F3->K^xj=uKfZAicn zc5(x6-Mf$9{_WqwCqMN`T%13}7e4fVvJTP59I&KZzgt(I3O!;Q{X7zmK!C zGn}2B;`Hpa4zH~>>0;vsiI7Jfywe$jD8^U~XUQ4S8VV>JT$e!0*?(=TlIw5k0Zi3H z)?uA_LdPqNX*;8Y;xir;y_LsXt9MDWp9 zK8*k9@BKV};%C1XVb!DK6R>1F`TFPa_Md(kILo+v{4O57@ijbs=c`EDdw9Be0OAgZ zdw1~kX-2nq4d4D9--)~TAK=g5xrw>W+B7$ODhy*>1Y*KqCDZJa)SjMInjLo%VPk3lCd;HFaI(&;}*ke1&#sd=jXKs-U=7j0Yl2#)&V_GK{b~= zQGTmnZ@ofb(wQ*b`rtL6U(f;s?SL4nG_JXezxK>Qkf)3;>X`8s%h$P?WZcZO(gPNQp{X(>uFve{RIMEs zciv^=EVOHCc-yjelngIxRd{}OfxS-fgP(pCAO6S(z^j1G<`ntyQ@s1RFXPdd z{}e}IgR`?IIDh;I)9EQv+8_=Qx9_}+%T2<1b%bq>__g2oE!@2I9A13sTk!cWe;NNj zUGEueS9YH1J$tX6P7e3n3|wTO8=cwAIkiNMC~Fib4K>PAX{uZ^Q=a+Zv0deI|L_mH z{L^EX?Xt_IvR%@6q*5ZK5jmvP%%s=}jqV1}z`eLRhm+6QVJ-hyYwxoUKsT$ao2b6P z345*et?zr^=WWd|FgL$|a2$@04vCTw;kq0i9kaW8faCa_w!3U?Zc{0j*xTR7FBO=X zY0~MosnlwW!;tn_mtl9v>FF`TvM3b_6#W9D&VaZd61W9M!x62yMWnLm_Xh;U5^-;c zrEF{|C^=Lgze>A3c616r*XNs@4Ob_Rx; zkj`+05V``&C9>Hjf+8A@u!YT)t5>;tVuM#rik%3W`Qo5 z@$w$L3(1Nu!2ACpHGO1=}Pat;w)x@r{uqcr!lailVedSVcQlf^28z89uQ z3?mEA%JVEULRgu6%fdkjkMU^CwTmnKcmM5w#g~5drzwmAQt? z-TND?zH^6ztqlU#Vl*Do>GrWKkM~v&_`w^iT)qATwfYRjN{#auuCTVT!EijLF*Czx zGzMGXdLE05%N(Dav43#L`IQSi`P7q03u?1-%q}jnySGDZB^m5HR+>(lS{ zD3{AL>J7TbXPoREGt-zsDmdKP#j_oX#WJ4l;`s%lFvh||cxC$iK4+(6u08!6W82~H zzwrhyz4Q`(VDaaF_3vrcW>6>w!vSFuBRq#h#%6;P6LGlGh?;0LLPcY4KJyG;{_?MJ z`RY~rg8>Ko`)qD(u(7_*`uZkEM~C$LeWEZ%_(ADq-?Xto+l_>|)5J0{$FLwu664>q z4BXc?@y-I@cJ%R_HXN|9l~!{gm6^lHoNiA))rnn#EX6rC8tm{O(1*5QyhHcq+M zk9fFq%qaG8M4jD(6RuppPG{UDlo2znIigrG8piDH9dZ52HD+e&+`IPdmNpO@%wo<(`0Qss!RxQS&VTspuXF2-w-^pa=*Pvuvb46@LTDns zWh1;o`Qs<0d~L7(XCr- zz4Im>5xriAAHV)O-LrjM5i>V8gI%bwy4&T|yL)VPLn0w~`pKu6pI=~Se~(}M`Cmp^ zigOn)aCmY;qM+HFC6+K43|U=WqfiKV@x}MCuyl?Km#=W;`bBO$wL-y{ob2o)2OiDR zCG4cg`om2QciId)W6q9GsMX4hqlAO~6Hd?iSWbcdFrq(-kd{lST&GZ|Af=6sAPB1X zo=?FU@z#Sq{_X$vUB3R6SNQx(H~G0QeUZQX%3te)NhNx$XA6yUF*J1nj7I|+^%_6> zvp>Vq;sXEgAOA6b`?ueu-9AIO0!2bN9wJrDx$}!W`|M3hr2_5tS@vuR+YMfpi6jVu z$($~SY1ChOV#LoTmjG!VL|bQZn4AtB)H=b{X(a-a^c|BD6dL-Q<7$Ls8aJIXL3BB+ zA24AHlhUIXZXC&6G@Xs(>Ly8Sc%WE@C7L5VPi23Y?x^F4sSfL7deiKm)6V&Hjwg&u zBaw+YDede|sS+)Qh@&JM5XVWJ-6b?s%g%y7)5cf2>7)zAaa@ycX+0*&cr*tj-JJ3o z*z``Hbxc!-d@{k$Ti9mgq34Q;vM2=wKKb$Y@q54ZOMK*0AHXTNI5yB5!0{QQ?Jaii zzKicD#$iNnFs9czB^pQIINW`B!s^k8-Co29iv*i)r_U2tpWrhe|15v|^{-K=ln6>? zo_YQSY}aEH#vC4>G8~1}YE4Sz5@8e($2JFtr}T!q^oCokJ$#RN>~MH6ZU&!BW#ymZ-i0~#h}FCtcz3$Gv}-9?C$ed ze{+kS^%E{FR`~1}KE%P6ILik=@*zI@kq>eH{4#}t&!FF9b#0Zs{awN^%h zSpwIcz=pJbF~M?N{WG~lA=~&6lUSb&S=WN!Z1Wi zCNn_cVL8^+ctXkq*F1np98YXmkBAqt2h`kG6SFwd_ne-`7!Q_C%Cm*?RO6k2pJg$lj)hQlCIZBTf&71j56Y7FG!Dons!p zx4}%SieIQO?kDuSF@Y%1oV|z)2E6tC+r*B`^-EV+xpWnQ&B4(zKYaaFe)D&KkF|{l zJb1XpNvF-sT!VfXQ)#xSH)n~4Beu6TxpLtGm(ML@I|-JjDApDT6OSLf+T%Oly~n}v zG4=WeS1$;xC}e;A0o9^I7(e97wM$fLH7v)&E0rmf3KWY4l#KB_AJ=o}^}B@qF1{HZlR1Ld;|_jIHXu``QXPs zN~hD|+kgKp&Mz;rG(W@jYgZ|jO1yLTKCk`wb$Y$N9>3*G2s$WOS3;5)CvD+NDK#|= z4WOEUdm)PA?7k6&W7D=br>C%Ww>(8(n0uSDGtkz3AfUiSRL0TQ4*nqLEOjY zWYnjS=LWjjBgB+yB=Z*9T3r&!#EbwOM7oifuBvU@s5p6yD+-fn^hae(PL9%@#(d>v zf+-0vrE@1%tUrU4A1n2p)UjRTjGwse)3FI@NJ>r|kZmF<#XvaPe`!rJEArHFA#|W= z@(A8P@BE0ONY6&?bQAM3Gpv-cK2?)Rl-u&$48Q;DU*z)(O2H# zy<<>RkZupTyUC4>5_2UV#%(+maoQVGYAoVc=LmuVmLnO31Hx#4A!kNo@8!=>^>w_7Mq;PywdO)Ca(#{wzB+r+qUriVCqhkz8=RB6C0JS zGVfTL*O%tHKQ8E|)X6A?5RM?QN;rzm`+<+6Wr z#$K;OAI0^n*JxFm{N-Q%IbZqeUr;IroL^a_-Yj!^ddO%nV5Tuc7$)o<>;q7)lnDv} zXQwBeo}AI`B)q+P#=raPe_#}p_%Hv5f5{*I)xY7_|IL?ZEjIb;UwogFew%jKWjGG8 zC@{CMLa|)sv~$8}*uhfTY#>efsFWm(MhGuPjx0|065?P?y)IauUEpJ%{}|)pn4`BJ zaOqN$r{DJh?!ULjJ9lm)6UD{Lm-yfZ-_OEai@m))_74u3n_J-S{RiB;|3It3JfB3` z2;rHDfxvNGFeU`yxMq_LjmFz6WI}PWL4K0q;M2Bu%2rK*J84rS#X4m6O4}q5r*lK2 z+f13R*|0hdy0k4+oa9>l`p-#BE=L+9GNEwC`b5m-uB+6WnEh=b+*G(grohSTTr*~g zQjez@KW1WfA1hQp4CT3|6Azr43R=~I2;+q_3z(nq z8dC*_lBvjG;-7OIC+m=Bkg=3=YHskU&(5+0kyIqnn1y&#ZokFG)+Q?#F0r+Dz|LtKStx?*(T^gkGqW7DJ6wP2DSqpBejDMqeE-|u zVQFE3-~HX+q*Mw}Dx}}nBJX~;%b?w(QmHUAJ4>@QL%mr?*if%kIXO9IZS#P?`_X-F z?~i%$6F<-O7hdAO`S1UlKmMcF81(D(dL6cpHn1I?nC6KxVPcV>7!10^qXC|9@x6dv zx65ca!Vdy01lSIaRnU#H;t*fJ@zx$rBcQf8!w>(%S8&4)m12q4-nh+Mch`98eb4iy zFMo;Qu*>VOy-MKw?Cu@#?%jKue(yM%K8S5Xhhu%!urx(M8F+X4Yqqi23BOQ&If^^lvvK>1XVIIjR|9UGQP;PVc?qXU1}Xkfjue2R};!| zFj$!`7-bq7+M<$8;{n@trX+T0tXg(DDIPf)ry)-HMo!wGNlDqMXj~d6O3KQyjA%gz z62S5L5KJwX55soA9Jo-t^GdGd+t93LI(;2SH! z^=yK`rR)cImX9R_Zs61Iv?-U%AQN0+aplGhMg(}(6?##L)%_zP%VqWc4(sopvh{GE z?s%Ix>f!m?dpKL4rx=te6$4yL5_Y>pqZk=$CyA8MX?GFA!=k8_F@BFhJSOb*sd$23 z7}C19M6Dv|?(MU3^BKPO%6ABXU;Xm0@xAZ9!tL9yQ>j#V^Nri|`$KHuX-T9++xct_ zMN?J;B-#q7A}nIC5}gT}QYVzca!Ylk9F>!QKxV13^xcs-#P@@YM{1?w;v6kmOOmYR zp9erqY%q3qf-7T-nt+WbbG$?)ghY=4pkywY%T*%d-Q-WNY*5d{a6L)zQcMsB&SNQ1 zS!@zue*% zUV1-2|1+PaT5m8KN#apRDJc>ihit6x(^p`ZiX3&?6w4L-a+P8b5Dx~dzWF9`uSc~} z!700XxZfMFw6MU`v!HOe-N|m$F z<=)PSum9ja|JPrA4GW)g(WB%^I^8`Yk)W&@>h%>~c={jfj#Vv&&7EFE3)*HkF`2v*_WdkibzCss%=_glb>k)Al%;JOY9#W)&g3a5##EH^wfBnVpwoYX0xayYsC z!)^n@(95v&&MQ+iyhTouMMeq7G$EoS&M=GVKt6rw(m>C&)u0Sw-*QBXsL_kXQpVIP zr<@y;E2WJi^q-OHJ7eR1&s)OsJk;@cY}SF5nldJFX1N8M&gRUmS*tD56W5$T+9;h& zPWxPik)pKhoN!w`q82s#;uJeIugI~`5<{&lA`9@DIA-FhkxP2#Owk|MAmxNdl*Pm; zrNdXy=IeqN@X}{K#g~5i!@O{97T>oJg(99v7$2SS;Ne4#x7(by$K2cA<*480(&ft( zf+C|)OuyG>zS*Gey1aSk4tMUp!_w6YJp29^SXx@(^yrkk@7+U134Tx@C>AMID?I<= zizJESot{niq|N@xA;-J>kc=4&Paqj`_3CA;IHc1Z;JJ#^PM`Mu z6N-fb)moLJ47fPg;>*AMvuq!p@%3+fpHb(8*WbFuAnb8@Wtoc?uh48Xu^fxTqaz-y z-e-Gz8z~iD5NPmON*vFgpGN>>I=Q&;aw6jlZBJsF1BsfVPN!sLW$^bI#McbLWg;hw z&WswQ*^%?Wsz;j;N}7g;va)PODJ3d4C2o=+W&9Hd-WfLr9VKoF8{s;}L2peGb~AId z$`Bsu-A!A#G_+Y{no<*gZaj*fS1|eS%FI8}_rSEIv?lDH{NFOSpzgu{?%jWY>pPrVnx|1KQZM_|i#|oyhHykO z8qn$VI63W7tu@%#-C`Vekt)Q+;nM7N&ei7e;y%}w%Pdx4q3KgCJ0vn>EJLcz3ay0} zPGF8GD$rlpxWQDl^CslPO%6i$cG3S~Pl zJDz?pQ*cvaG8t@JWCXTsK4!4^sZ%-Kq0>?|ZE8dY%b02=Ez^ieOSwGK(Y6GR(YPkD z?hYy@|Bc8E>!*yH8F8G9;GBVejTzUrZCuBlDgmdQ@CJgLHHhNrI5izbAhOP_xeCes z001BWNkl)v`??ore3S!cpjBj3+dQ2XBu?c zXSj~X%F0D7TX5FxVL2X`uU+NplCa7{{?mQRgTYTjCt9<NUb~hgK_~RWB0{2Au96A`?ZaS_j`}@AL#E6V6}1PA5!wZ*7xatT;S6W?^oY zt5+|vvT~jqH?FaAewp*j=b4#l5l8x(&}_}&7fN(H9es7kHe$y_y-#`r!U>8mE168j z5OI?6GA*Mm)iKj{7OR#ib23SpWy)6V+)u^UQ4%9u2aKTH5sqQ7O#F+&1g0pX9?>8s zm6e>XQK4wNREmob*`ly*p-&D+OtWE9_r21>?)=G|mTOr#YMZD56KkN|7j~At7)NpT znr(AJky7z2n&J|&lML3m*<|iz`y!uU*^f>Cegtzqq7d3c9(Lg!niZ$ zVE+i)3#iS_5-I3(P8gh?vNSi#;`wtpMW5vh=Qy{rOsP^PsFf+t)~VF12>rVxfqX zAcV{8`~uCH8BRMLy1gEiQh`e=i!`c5!eNgj3KU7F$M^%<_6FVhId*kPC6=^61T#>T@@HjP4&xmJUjR*jSPA(kWQ4+k8c98<11 zbkDXKo^@#NozOn*5jzFqW|LmMN>rNR_+-prBx#+y$o^TEu9Tb%2E6<39rpJRP_|9I z(WG3gQV7b}!ll`PC>J|7JP$=j%OaGZf0M=?PVFp5S~teS~s zNKD0@xt>cHMTUjSV<)F=Sy+yZFtFkD=Tc*5oiEaIbsuG zpdQJ0NE=+K9$4T|^ewEoiwFmJ3al84jV5szb9Qz@x7}gu-WspndYieG^Z1^i;y4`j z`$U$Y(Q42i^x4_oq*<=9eBm6k%Zqq{$I13SXD6pPw!?5RK;bZs6^`dIH#^V4!5$l% z8yxK?EY!;kJ7=_yju{SyC@ZEv++t&MpI*C5#f4U(OfXwSB@!Xe@I#B!ofC$hOTnp8 zFEuzC4{+>&{^^jaSEikua(Fl(u|q5YGjmOfr6QgEExb{PEo0i9nB%dJEYwkPm2&MO z=a;T>u24tF4&m9DTD8L5(i}6(OZ?d%t?|~~+u3s+xCJcBW*82$U-N@9=T^>Z0!19- zI4;eZ7U#~dpp;~1cZ+VfV-#kNLlTENSt@O)CgxsY+gb=N%)&q`Pf`a$*4i*A3`$;4Fp`L@<*}9nGnx+d& zSYi^%{fKSJ7B(Z3%!`y$^TSNnY8IMOAW#|VQAfJzlio>3Hi^`9?j+qz%x3iy^skxF z`w{1VzH^(-Vr`QPXkq76LzaOsPwZDxC0K?7q@x$e#lKCmDbor;SRR&4C3DKoxEb`=y>y%tY`(T@HZ@?%4{1T2WS!mX{ zc=UkyCypa5B{Qn=plk_jvq+Le$0kp-d6VQ~ z-}8*dRq7qxESyZHCxo62dqvu&qsl}eArk}CQpCB8jYkLfD00NPNen_B!ICy!n%{n`~dyLy%7Ut$~RKh34!(4#2A4}9Wq zjO&n8I4WHI)AcR zQRbRnRHIy!3B}6MF2>%L74#DD2x%nr#}2V|ME9}miJw(F;{d5dm;U< zzNk!vm`G7kK^l z1GaYeSX!EAV|^ooR@r70rxe&qvwAI?oO?!P2U!|5uB1YELGfi_f=V+TrkcQawSp#| z=BlKpmNChy^yH9v>ygNsJF!6k+Cn?u^YcF~pp}_Kjz>%#R0B*FQNV zV)lN7K$?b4x(M^&CY=PV48==>fzn3OggK*gLiCJCXtbjcdb3GWwJd9rvW1Q7cv_{C z19;`_T&Z%#&XMNwUE;_8*5j14gPM{BQs;`+m^!$wi|b4jNeZR!_!9-7>F#PTpQRM> zpu)EZ3=kB%r-%K8Kj6V8s$ z*jnGrk1U!!DKSk3*h&@dJF~bDyVKtFpbm#={5q+27g05f+tl zk=wUkV|()$#bb)B=*wsGKi3R}Y($3AERgcj;o#ijHLJ6iaoLwyhV*FdRNM zW6WF!##ND*dF9Qw`Nf#W{ZA9#%m_i~k%*P+Y{oLdwI&K6C;y9*#Q?W z-(X?!Ji@j)*x$vq71e^z;oc7G5AU+R`YxaQ^vAe);|f3i(f5c(A?=ec5ANNkTM;Q3N(;*>QBVaIn*lF=<{Y9~;BQ~KL$z^@W^0bEy?2O` zKDXamrBQEk{fTRAZ|$(Rw@;y1Bn-!y>@d|sOPQ{h>1aO7vh-pck966Yo*H__=w_=q zj>ozr%PCW)AE-&gLK9DpA#7cdV`LGwAnJ%XmYW{wkEnG9+ zNRf>xcScBE!ls_hd}mQg-EesnX_2v0fk?`!K%6vgTV^ANcsk}yO=g;!+Gb4IQazF? zmDAGZlc9b>cxv`fl?Du1xz4i8G`QOIoJ_jEN|6Y?2(8rJkN$bz@pU`*lC7RVT;UHw~?mD+`-Q(ow7{5>k%cfeZgCjWYbU_H?19jNlIppEqF7<_3=I7=p z37hEX0Dm-MXKxE@rok7#_)Fjv_~XC$3Vyx8r#|yp#-kA@JKI>@4qF?mG-pcu(y#p@ z4>qH{ZWCKJmgh0E(8Trx{ZR7W`X;MuZ&R@aMb{xl(n};Ug87*$*RQl_ zU9A!<%<;}zms@Wga{t~MPdu?ebEd)@x9$*z5nyro@@3*U=KlTrQ^Jz8f=OLFb`Ham z1pvE#>1Eq?jgvkxnmWAzgkuwiV}q-(9y{Ig?9sHDFj1OJ^mu8Kl+209)5VeksZ!H! zde@o0JISfWNB&gI>rNAM=hKmJ6MUZQxS5eRpSw7TcebYQpy^ONj`jS}M11LTUKtp( zJ;~2Vhxo~)RL^U1Q<|R2Z4&9#Aa4@Ro1h;(Au~|3mFK2fnJ+WNn&kbM>1T^EWTz*k z>$q9Ty1%=_KYaHUT1AJKKKWrrr^l?`eZVM`v|6()E-n)kODrrd;RYT-P+&M1fKr6v zm_ngQxz%F+@@1a;*vDDCb`7swCQ*_ojA+cXxO(LYUjN~%ynFjDAN<58@S1fNu3zEV z559lhnOmCY`puj8g#tUf z`wWId8nZQ8vsGf*Cn!1$`WxQN)~4nuIby5`PYWhk!O!Y7Gxzg7Alu2oG##0{S(F! z%CfN?JtItCQ&tKZfqc4&q?^v<&X$;6P(4;2Pu>$#W%9(>ZB1n}YB!3=L=aQvXH3E{ z6W(gpjaJemOz9brV>^UlgpExlaQMfc{V@OfcYcMs>fx(|_SpbS1nlh`are$Wl;u#Z z)ffy0Y;JBcH$TsCIAk;qxqRbE&OP}IcC|)NIQX?X$#}%s<~s89n610_IM~>th)=IS z;6p$2(>(L}PjPQ&lW+XTZ!*|Bpc;k*K0JBjB9=35sC{3hT2&bM$qiyJqtFc|b%TYHFQ31TI&U5lb$q<7X~ zG#F8fTB?Jsn;8{+ig0XPOh3GLzAZYFnO7$aJ=%%W&=mc#FU9i zwh;4WX+C=~|FAfYYnmdddqQSWX^tz1lZg&59}1G42A2KU@1MpD6YBcH7O-#iGOr&!Ym{$HhT3X$2jL0S&)qLJF%@$WLU*N`tIj$|w(kwc} zg8_rVh)$=^xs~%g_xuaQQG`qqMx#DaG{$oT!qSP*mMyru_K>sQ5Zn@_`V6H?jbgdN z*5LvDB&J%aGidiX*g4?M+qX#^oAZ?Unn!&A>c&xb$$Q3~Y>Yn$(}w)qyb3uT^o>LS98Y0MUR z@qV$k@I0SxyUnOoV?6HDK087*m$6)j>rdQZd*_fJ{@{BQ3nhX=m2f=daDR{X*%|R@ z$hmXNl;>vo=C{Ac;uFtsb>$+V8_=pWIq7vNSDSqNr7s}E5qDpI9V@g@vCZkQ%MZW( z9lrg_D_opg40ljud=(d$o>!%Ld5+SB264$n6l)wFkGTEz9^d-LtE@fPXJ%oE*=C&wckgp_*x}OE3nVh6 zTn#un=rA6Kc%F+N6bQo+VHjosQNHg_Ws9byiTty!eY(ZE${HHNLdH5fE?a;~;iNJy zgE^eY;SBv+V#EP5YxD?VI(cD)w5FRj`8%I}I5kpZJRTDSKEkohTu}eFqtS5cE}8F6 z>X1o89!P28DQ_l@G%GbC!zSj-zAU03R|Zd&ga)54lPJ@hCh@d5+cIgt8J$~cx}7kd zPo-jg56_jR`BGOY5SD8M1lCmGtIRE^yw5T3snnU5lTGu{CXnlSgkcPE2ohYc1dgw> z28Ugin?C==zxXHo&M*8N!`*G#hewRVgl>1l#_Be!s}C6~iR*YQFDw&{#+)1-aeTDT zI2>}}+BJI8rBs-q-G|vHpW&DP(?8%K>anwTKpc)q#$(34KFwN%VZTG?XpfC|-{rm6 zUPJWsuAIjTEVgzx`ROlwfrqOPIX*n*<_j+{@@yg*fupF^tCUM62Hh?jtE-&u z9Z+&T%5DKGRD^>ekWi_YIO`n~kB4}cO{}2PAK{nFRAw54MBv#}=WCQ&B^9f$Reb>8a9cU9=)@J_7)rKd5b~MSEp>CG$-}nB zBt}s*HCwcdYa`#v&F9dki!%rUq9`;pG-Yfilcd=6;+uA+vV^6y^vc~C)48P(HbQAC z_oH}?y!rPh{d)TEZu;k&CQYVQ#mde5^diicpQ&BOaq{^fYU=*2q{I_m78aX2Eo{ps z&PrSHSj>5LcTP2~Db+d0AIT^QwjyvGY#XE{h?T}nURqw@7ryv8{*#~j2-a|j9F93X zJ;5&&I66F|-yYKI4rtBKbN%`?jt-8Hu~v*kVaWOO%bc8@B0L`_b~xBS=CCv1?#>>U zo_>zz!UE@37FoWyM6tC%=z0_zbsEh&f#)&Pth3N+VTU1q{nfAH*n*|G1@?FLm}}0m zzP8T2`>TYBVmt~-Vx47jd~{0S7jPVp#=0~qk|Ez{rDc=`Q{H!f zY4~vxA!R}k_>9J52K@n+4RIWSlIH57g%xok8H`3bOsFJ|quh5$rmAvWA#_xAHkuHR zabaX4r{-^ILkIJq5vwVobsGDYodUw0>b~Y5`uyS?k4G75Lwju}Sjb0n>+|D@B$>$J z(wMfX*KRwR-Zt;3PfLV+_wvzzoV3PI7q>0)AwknYGm{2TJh>y7|4WnQlBq7S zWf-t&9JP5D8d;fP`$cZlVom0T`ItTP`{?y)GYCgG!UkIt{%p%)ZDWo1?$!y!XQ-6R z2;ovG6xrC`VK@$%TU_M%7hdGmx8C64+B(Hzkq7tgb9Qn>uhZu3cW&YO0hew(#mVpt z$8Yic^Dl5{d;H)#KjejveTebc#dZb4^}zEZ|yLBIR0@rK{K2jcqQiT%c-~S-K{!U)4pHo)A`cgzz3mP^_`#dpzWso47~^^#r)S4F zg!rC^A9x%d9#W~4kxCJUk;xKOCfQgT$tA?nB#t@4R97h=OjnN++y>dE?r8ixQ**Tc zzXg+h;l_g{6R9!k%Fb|#6S>*rCrv)nK7a2_N#ZGYCc_?EGLzL!nm#J))@pjTLkLT_){5O{;rmuRzkQ_iz?sEOv707GCzGE$cr1&F-l}mjAp@@RAo%as%42B zfH~dxCe=uO=9HLdh5S(X(Tyk{xG5h=&rRoy3XQV#JO^RfjN=$3Ei6}qv!ZCkxZ5T1 zTu366vY2VjQ7ANc`NSuVs?*v)LUhJQg}WGr)M-ObGV}#>Vt}WoWCAy~}_inH8#_QkX&0Dv5@7^k;K;s{$f1cidJ9xBZyN|GFg!-A9>teXvb z2Tf1E{A^1}No+W#c3Q596#8U*gVG{43(B&!XgW6cCfgC2lRdu((i2z(u7&Q)yY8AMC3~qPWw=YgwQ);L?cDWoE(B zIz&4YvkNg1C}bxt#yXJ`D5Msq#L(lC+^8aJQYrZur7qvNntYWL`uJm_0c(4eCo*xwNpv*1ZGv;|RN4pwsKonyVx2fO35XRKzGA zA!<#k7b2Vhw(j2HgJ1X@JNNFf(3+uAuX1noA(1uUQ$O`-9&W9&eY!!hSj4ZFDKvc6 z4%Vo$QxXn>u)GWoa+2!OuitNR*5; zoXTJ|wKsLr9k(rKGS+j8b(N4zb8m7eK_LVp)ts7#eK;Cs(Pu#B7o6Y*HN`qDr{YLH z7U7+boQsoi@(?Cu0st-d%A*Q+tbwD;W0m10EX&F)UMZ7C$e;Xk*~x5A#sc|HYZU1w zk}D=UP?Vj61gAoWwA`I2h9+Y07@4G+)RcyY86lZUL4mZ5U_FI3D;=04jCoj};FIqq zTcC-;v7E;oA(_@!*hIF%Awd8V7u$A8#$zteH~Ed9`wU@Z9rHb9l7RgZJKJdt;remoKxlxWtct^keq-_o%m;gwkbw=ai?O|0GM- zpX2u45g+}k7=`QjjQSz}&maCf{`8N&LbJ9^uM;vJ4-q25wgA^aD8ChnN{}Qpnsr=H zrRS1!VuBMHd^!4WviBk$9i>ct8$*E`r+PUVBPG6qz zWhSS!${gEi+uAZ3wRD3?tB&;F(?26{Tw*opkjjjG4Ls+7PNHlKX|GyK>8{I_|wWz#u4As$QiPP?29LRKzZ!6RVz;Rd(g zeuJZNI~+lS1pEc2<)et~;;wz#)`z|Bv5j`EEgeBh;EHfMpDVKdLJ3`nJOGOMjM?}LOf(VzG@t{k=^*~t+MjgViYXXsMJkMho zjzH+#S4-JC0!C=|QJg5Ga>4QtBEa$rIK>LJ(hT2!X1z{o&e_Azdj&tyZI0EOK&sMz`P11SEEvGG_Qhn$Ab2 z;kJd9byKtEjiCcNDGM}Tw&gP=(miLFI1nN*7UKSTZrig6C2fgda9^L+x$vSbt3cTVau|zfXlRrl5bX}8^MH8DssXE zvaV|SIi;ZGj7BcV1l?M!ATyzTW(byfeBZ>8Ar1dkcVktyNv1S~$Po5y%bI#vrqoSR zN7p4P#zGVP^340;F?NA5`T)BLe zTD8PXqsqblK3nVCtUX*~G#c`ek3Wy{0>1j!e@QZSdE&_rve>LMzp~7z*P&b}(5g2W zb^8P*?co~?`h?LC$C5Y_3Y9ux+@m+{;Z!QbSj59Ij#owrE$MR#9>Z{m?RzL0;VO$H z39&qdCA5ZE3LA-yZ3p%gMB=sT4_@kJB_OiA*MC zU*6oCq9EfWG5I4gGSNySonoaIk+4z~kiPqA!nh$CBod{fY++iqmcQwRV@i5U8Kj~X zc9Zd3&7Du`yRqx&GHE=DCJD+?5=Nmhx9SBQ6C+L5qclEG+Y%;0*&&J(o_XdbKlj;> zQZ5JV9h|V#nx|ToY;PYj>JIS|2o)$x;dp{>ze}m?^UAkhVSROtqUWBT zc-j`qGJIhSK2u68B_RRF0^7j2m8HyD(7Bo-v7MKdZ4eqtckz@=-3OKtAd4(9SmiPc z(}|X3(rwJ3NjU^ZihWFzg=IPhFy`EhsGP!}3?0rk9Z8`kLiMy;n=(z)(MEPZjT3z1 zK;e-)R37Os#Kbuv6;tJJUg0t=TDNk9@jNRh&P=0emo&Zi>2Mmyd}g|}t(2CLp$wXV z6#I@zd!8g;rINh#9y^JTi?UcfOKDUkCdt_rM&5_Y99>TO&?Jg+7&0F8xVo~yPkr*k z6rBVaCLHdcu(Q6+@xck67jW+Uc{;r=JKLL7%OwsE_KA~_OBYs%!;rbzdDhm}3CA!R zgj~LOh56<43@yRwIHEk$;^wo@F_uuSHt6<8C`(W*70p#Z8alm&<9Mi8YEEy)B8s!1 zSs`?YuBA+xv~a*&KP+2-T|_ty{6dRjah5`{fnO}+IUf7_N8GyekkN2V|Ll-q`w*cL zEZb!`ju;F&S!{etiBIB~@o03?!*8HOFIp4Df4BWBSwbcETQ&({%210hup1TTIGAn3G1&C2buumkrc) zd)zgDj-(xz=7? zz0cA!)6=s7gBc8f!AcO|DncYh1;eyuS*G-)2u0Wq`9a}8*a|=R!4V3FEL%%hk|kO} zJ19~NnhcR376JnTn89Gyo_%_)uIgGct8%^fp2ZL6-ps5X7y-PXyKBkJbI*VIzTdaT z?K^k4@X#f8?(S0C)Zm;TDEu_2 z2B4ehb1AW${CJLM7q?Zhmp#Qiqw=_yKFoj2=*r?^Pwpjv|dK?TOvw z;xAj@9HBpz3#$Mz=e5oy)F(8Za|_qp{ZPZP!~*)iCJ|U?!a>A?x8WxD^L0c0J1+`= zLs@Ksvl~CF9u^l=i#x={-Jx7ams*s-Er#19Ng5KCyr-~cHbJ%(_oogV(nzF^DV?rD zJWun`1D8-JrpF`l>6AO$ceuN|hqaPRk334F)#i`>VtG zy2ZxQ63sNm`;>Apq}^>Z$#Wv+;*Htf_nwD${W+SYW!D7x>LD$#v-w9&b)QvXwQ{)X zKCE>WEx&qrvk>dH; zz!)D6q6h_`^I;|+iU|*#S>xd|>ol#OTD8ckk*;|;#?=YP)f@-kXSyz%;L zEG>2U{AWMM_rCu_uD<;~S*y#12QER9(4Q7WOUuNo8#FghldY|D{*i|{egA`qG$U;` z$y#lkROndFdf+5UiIiYGo`iTntud~~+DVGS;=I5DLTa3sAR^MNO%ye7G_azDj#{8o zbdr%~8Cr$Ic;nU)|NYnhXa4Cw{2x#eP>vZ5`Xp%t*x<$&^2F!1R(;iGd8O1$fN}4N zjH>hu4`dd?j6_9v_W=%Gt-!)(Lov&=r-p$ml+^Lpg{)7mL{|e9Ri&-e045#Dnet#R zN)Fd{Y0Ho9xE7Fu)H+ffZrG3_d!O;nQvln%KcQpzB9 zul#$0xgw`7focatEz}JTjCn7*h~=0ax@ulse~mf6xmsr|Pz>e@8y0R!QQfYUPua7; z>wN&+JcNU#~qDrYE_IyR@Qv%jI3BUPQewLs4)W>Lw z0EsHQNk|f=f?6 z#n1_kz2cEiewvL3ALioYk8}Q!N9k;AV0Dbp3Y{hxPayQ+XGzV)bV?GZ#7Po_B}!wx zN9h=3M4V)39T6odUTCC>u}&jpiquVnY9OM7w9y2D3noi6sT5qhvB$03M`Z1c)RsaGlZ$AsNzKh;i;V$Q{p(nm~ue`O$FmUc?cI*7xfREBuQ1Og+t)w`~xPuo0Xa& zJSX@7XMK=&GmYxKAHq8NEG$#!Nm^Q~3N&1OMt!aR@fA>bzOGP8p{m{C;_h#;M|PEJ zV$o!MQfn$slycTRU_3DHf0Zz^Y1d9^A#htKsa|@Z5;VEHf zUe0j(J>X1bS}hGRo+qz7#DnKIDTh6zcTDq~8y{Zh%HxmmiD#Z=@9sh6)f;m0fd`n3 zMttWx-{R+g{^!w2#_R9APxsVme)X?>jTc_{5hiK#cmC_Yhii1%pA;ddvEQe(jz+hG ziZ!D=r`2j=jAb$@m^hE{hDZo>8e>dBo|oV)Q5=I-l({8}6O>B88?r3LlqF?pkx`Q* zY2t(qJ4XRp3zUozKz}&ml~=FwN5B7v?7Z_L7tePY439%Pniq&XtQksI&fe=fp}X$a z!~3HCf7>Kc?VII71ceZb@={e9+`>S|nL5%!E@Y&Hi%!L9_Vu8%6EiG>k{V+Tv5sei zF(Ji*yFUCKQGG#o&yH-dzIM*xYdu~y<*P9qGn9u5hw!J_n}Psbe#8}h0?+U4NA`%$ zFK}zb$uc_h8Wo&LNNiyOmxP6KUCLQdNo9Yi>`KgfN?G4TLfoUZbthzdvneXBo;_Fp z4eJ=C)rp9>#U1J*4Q^KUFHC#q3CZ*96BbEx^*+T1x|~#s$1h*vH^26C+`rynIT4iO zDaXAd(yYPpaKvah!jy)hC|TcF=lvTWaCCUcU;UL|;qc&)+q=6Y%{HCYRsQ%-zQMs! z!I!@J%e;SQpSz=iY-t^%BCOEFS%WBziL;2fkzk!;G%2v9WilR<#xZdeCXB<;DDabn zuMjW^I*uxzrmQ0HG-c^fD#jy`GD1ZPDs_t#o>(h1p5-*5wY0(OZ(QTO7ykm`$0#Kz zJy=)eczbuEvYChQG81;pfM>JMIcMxKz*>t>iX`GNX3W3sb_VvY` zmr^S123?iIST&}Cxz5v+b^*d05c;y33`wb=?!UZ8OGzBXvmIuA&V;gGg z#WGIFP_Ij^W)`i`ch2odzW)AR1}sH25fnmIA4WfWvBJ$^ad=DxY>MLuS&`*D9w&S# zBdRBzuHLLjRSX>;rj}vw6Vj%_DyD)S%dmHJ-r%WM9MuO3kFAj8PJoIuTIwM4ujp;{ zBEQb`uvBuwDi%)6R#2gLw$&T#XE%9RJO>uva!@c35}GR ze3H}a^%)H(L{ZAl-2-0u!4FBZ7FRAm#(VF+N59|ahd=xQV8I$pQLue$j~~DCCMsR# z)P+k-03#&rr4^ijw3A>xl*Ut(jzmYqvBDb2vIrT4*CQ_seik+)t6jAsPGXF)Rm_1R zjuPTH#hMVst>d(6VI<8YA`;>AI(3qlUipykeDei%?p(taLxc}8g=LYWRTT$V3+-zI zdOa7Q{*2B;aAp0V!-q>m)Dkxf!q0p*V#W!qpwhldm=<+QRY9plSm9V}(7s}v&dW_v znOo!Vfz|?1mPj#wdN-O)%CZdPu~{Tz2s*_%l!RF8kab@N>Ud5JIfR=3f|lwaN;SJx zY9WBuy6Q2QB?pTI&!zE|!oi1+qZA>=xvY>7#u}VAGiQX7lERkr;R(K4EmQ-m7n_(Pd@%IP3`F2 zy2IVOJ0vRO)cPs5clJ3Nj5&YtK~^@;Vy)+c53hqQX{0gFJ^OLm-7dq?nBMK{TzlgV z&wlZ1G@~XruHE9)BlmH3YXvVf)<9nE=Z;iV5oiWxh&e^SIe;LwJ9}${?yVGD#3ncyFjbXs-JAQitkL z*Z4v|v*1W{6je>X2_dFR%*?OOJBslrd@!^|Djno-6*?a&ks|aUQVQbAkh~~Qt8p5O z%$vH$TGX(*I;*}qEFnJ2h1E!1&f$=v!ud;q@Crv*$9NfDm`r7qCTIhVcNKFcJh~(@ zXiw|XVj;qGHr%QDE~#Xd8Z|G$r4W==jGe5$sC5>l#B7yOkF)EqY|0SZF0}$1NN8nQ zA}hB_@V-@1o8rYi-J*@C z{`i(vD$^np$JXFsIoE`WN=_HNV=_?dq^2yYL5-HxL&a9GsNl5LRWekaX{akqhOhWC zr41?%ho>luD$YAZ+(|7FwuH*e3!yyp#?K<^ga`-KS6yQq#ewu~Oh7w2hpxowiz59x z>(MW~F4o#gBoQ>VN-0VY&RELQAcd$TW1*)lox?ayk|uQ9ie?(|-kYzWKKm%y(h8<; zFq4wWq+~pt&^zjJcdyU>!7(=AA2@U7484OrZhm-+#~ynOTNb?c-g{UF{r(Y?d>g%T zmU9n2Mw%vk;~Rg*b6nD5%rfIiaIO@&p)89aL`afqz!Z^BZMD-50*NG&xF`WH zDUAu^Je`COyH=FbDGEVuf&|T$hD1fx4%p2Tf};i-YwL(+g0NGHd`xL9#s^S|7LrIy zoD-FZUz~6XLDecaKTC#o-kzZU&7fbk2e+E9sC&!|i&)VUd;0nAEJVUBeseL?jT)p- z0l*Z6A6`^Lje0&Is-0l0bmd+M3sD_K6h%=XGlB=K9=N1wI=d+swV=_$hjGpb4PQUY zve`{!%Br!iJYdE+un|J3N)i?lh$qtmt)p;ou-G!NeT*xKBHV%@5>i%fgNmnG{a$s5 zCjKNbu0}fedWR|{T1h+-?}Jdn8W#e6w8C47lQEO2p)eDgvEfsn`z+@#T!hg+#c0C8 z;gH?C2ORG8iFJdbG~C_a=lZQ}HcxG_ytG7;#B6PCapU?8iaf_5_~DDMuyytV(_+jA z@4rWS=0P^r*Z9mAKE?6yh(_9A*w4f2t6J+c8W~E5Lsl3wW6^3IQ|T=j45lPWL>z0h z&`fhfqmc%EvUOE>k-&M05`stzLfXPy-WUnkzq8GYFTGm5j|D0UmNukE2@Td$59k;D zpkce7#g0|Qt5eYL*@7vA_$?iR!ob0j-MA0$MHu{i;Jb#VqW9Ds|$}OpXRekq1Fke zG-#bvfgzeG%4X5uT1kqcB#F}ylIp?RvMQ-%(7D#+0OQdzt~fdtK`4KWF{DX?AjVir zSrZDhMyb#To1zT2qmD4HoC&<^_B}jEs#tG)nCR9bd!?~>;wZvZ)Nj z;Hhm@N>b+4?dkk%SZ0l_7RL%)_#UP(WLZYL)y7N9=ILcV`P5}DUA{!_J+vy;L18WeGt0?Q? z->F%j9WJK;QLD0Fs){QZ*ysErG20``S>>}Rxs(f2{V;?K=cBIn?xkofDzS8U;+|Yh z)R5Ks;t_mU(@s^xm!`^;Sti=MuFF%pZxF!cvAq$)*K zI&$byy;GPnEK#$p!8o^gS@nUDM7W&kcu2R^!der|rltrthYa#N6-DzXZxwzw9ZAZ` z7#$_#r9o5*45YwYe?l7?r%BjR*$`GLt7+z(>Fp zrzPW2L8I9WP$%bET51qQLCYKK2qlB#!Fh1Ds-)_Ho($u*@LEAE1m{*7eBVuY<0r4s z>9!aj_k+*_8+NZQyr)VCgbF>oHRTL*8Qw=74%-6Dx0X~c3NdE!^~@n{v&rl|D~v@A zvzRLk!e?LT=RDr2=35g43Y7~dt z-%*z&ah=O3Y!8I%|@KHkBP4snwal!jtj(C_!@9rjpVS?7r- zo+6(b-gxsZKD>6F{euINDB|?i7Hg|3y!Y-qyzfzVOvw z=Go^z$Lm+G((mo^sZTt`PkwTZw41TI-e&jikT^{kPjcckMn@s#xtx~7ks{5)p~$CG zqBtRrY8hIX&^v3{-{Ym1US@oB2Vuu}@&Eu6 z3cNxKh46K+A82m!p1hUX22~4S>YK1;HZ6kC>H*GdJ+ern5GRA97I&93LxG$z$Yd1^ zTIIp(2=5$`k_erI9>oNqv{E7d%o~d}1*pLOkU|ngg3=kZib587b)%@ttgNf7#zpy< zR{~cWu)a$Cm1wQ7t_*5=Cvn0dy(f}Ep`%p96Hh$B{SQ3IJ0HBq*47$->$m>~k6(F& zh;*&-Q$fn-ei4sjrZSrhie~R+ zz*doRsv;vGbU=t$6M7;`$i&(vyu>6f2lYpub1FMWsaeB)c3x%dFzok-3<{zN!1 zPN8IkDIJYQiqaJprIIHK86Gf69Dz%5)}Z1j+M9)Wg${Ls;T647DSbG9!C-0ghOAt5}|^F+bK{$w~_GFm4~_iz6)Hqc#fw(@hq1g zeT+`JgqEHpkra~w=gw_~rT3}LAkL3kyPrvsE{E3}&_or@1LIyz+I+zREyVM{@jNu=~40v?H$F zJw~}ocp+wz#(K@*t6s5MN28TMIyK)oI2ZQMA$M@j9Y)S%bjF&6c1(o_aiyx7xp$NZ z`^Or)5s6R^)tM9Zwe9c0T>P|2&>|(fUeYi2fQ!)H#mVVB+rpCh5@QMq8HZaqi71UA zNg}|}&;_4*_8I=pZ~sl6`S??$Y0S#XG9HC>AbkwR;(SipX<>^AUf_pAnlh*|?0Co*pT3{ZJ$WDF+;Pw!vU4!tyFYx5Kl#SDc=5HX zxYD81CI-pzw4m8;qs0{ID?5#|NL!+)hCC`nm?`4QC|w3`p|3p^5-D5gDgq&7KUz?V z!hmxHN*AbDfRAt{aBPH+aRy=?0iIY!oLXJy?CLUK{rqS6^5>o*)g@6ZDXl{bg)^RV zlvDJFL>9KroME3`w(s0#d3l8_YvG;57|(03zQ&J#{1Y}dHaP6}xPAK$MPb=Gb(-g% zeunMcJG}GuyWDr-BCS;M2mklKV|ls5xvf<;m)4Q0#M%*0KKda02%HZx#Ih)9buy4K z-Ucse6vx%vA!vZJEM+pDG9DGgsix6PD#AoOcdAHFvzbz7)oK-o0i2LHYq|E}hrIIQ zk2u=hA(9128XtE zf82w&31kO1cYh>F_H}b2w}BIfD){*92`}BEHgYkAcu^>QBE+;7FIS^`TTN6$&V@l) z8?X^tGj%0ifr=EVQv9`F|26)*zyEjn^{@RRk3aGd%ggNmk##00E~@>aR08KpM#DbB z8yd|fI!!|fEeaw58jHYC=3}gxqP)T86P&F5j{<9QoXN3fO1qV@xzgqnAG^YD{Ms+` zx#vI4bTXlL*bACb5xjtWe6Fgbc$9nQq+C1!D5ig;)SW)-~n{(y#Zmd zuE2SV)&g1mnTWtzD6Ik_@DaEeNrLw=j+p6aN*cj0e&zH0+?PMksnaXyRtu*U<9y2a z=m;^M&=Q)4h_Hp_){R@_Wy!|+I$4&H#2G*N$t!&Sg&&|)gw`?J+uL-PmN~U~nohgR zn{U2Nkr#OHSn77@wi+Dl-ezNCnL%$4X>-z8(O6z#;9%{(3v^GP=4g-y@K+p$A_4B} zXCX1Oy`2h!DIuZJN(0-~ghEl(lE!M67)NU6zK&Fw-j~Mn+H3Ffr+@rS`geCx7;G`1 zG{uY_SXxsJ%zWtQt37a3X|!uOT&+j0^!IZuay{g9)h_y;*af}d-(3{-FFKlLYBYR> zuL13-u=>V?-CAu25n|Tc%|mAVj80Y$VnSf1Jh8S|46&*Comz!Ap(u*kv@kTy5vY_Y zg2EeAqDWK4l?NW+SAXFb`Sri@%WSPLp_B@Nt|1&F>_E#0ld6cM*&$X6CX-+ni#5tx^6`XEef&W_{oFrf zXRptn{@J&A;rl;il3Sea@RvXSF?+p3Y-zA0SX*;v3SvJAyFev_fO-;2^p;MxI9Cs7(tMiNg-!+grX=g9`3vE9RJh*_z(E`uRKq)t*S<-LQQH# zv(dr~#~j|;Wzy@>?R02l8Hc?d*RJievvZf>aDvnk&wTQeEG?~&CJjz)ZE^L@H~8>_ z8@&ATtE{c9v)t`+^VT)8BxW)mvcB46H0+Uh!|~xEolcA8$g{b*j<=p`*KW}p6s&HX zBFZ!fk1-`>Ip2qhDsfpZs9C)Cl*Xa7nDv&jS_v6zLi$r0qjY#(OB-%5V;mp8^B&i( zzK--`8nH&BndDQvEkpJrwpwd=yshPWm4IX>JP8M~o`72xhK-d|-#Y`|ePlp$@1gu@ zVDK7rZN0?{k4Ty=?@=dYX^WwrwfyR*0h?x1pjkueKk@?s$Vkmc_4kZ=ofkNtfb=vw zF%Mik#V4P7lrKH^44;1LNzR`;4@n1KmKbZ%jRwNm*_(5)zl%;Z%WEq^BU((c<%DuH zA|D_bk#RzKJmC1?fGmkMXuA;R!Bd zq)f>39O*oh@t86<9Q4PONM3*UeLlE-mpcaoaw~Y{m8)F6`gSm2M-9f~lC0Cjx+&Ai z02O5V$n~83qx`=B%zSv$E9W)6ajFzx&_(w`f%& z?0|>w-$IF!q8w3_QyNK%7*7#-35CHO59r^x!R>3;*}l6=XK9J`m387MCeIDWM+20K zX}7!V-96x2-~Kjtc6K+}YmdsDH${vs;YEL$X9Lne@O4j*pIrvlQJ~=DDx_ zDz<%=UesaZiI34jHb zki=sv%aq3BXrvk?;qJkNfBL`wF8}hM{A29VUC56^B{K6GLSc$IuSnL$YhUTI-AqzgZbl!vaUsTT2 z|6hGO*8isFgUk-Ew;%Z$v;Rp+UI@}=!hL7f`1N1>GJpHG{yJ-&2GUH4q7)ysu?|XW zvEGx$5!2}iZ*n?I9desfI*ak1PPY?Q59KsW&5ru8cNgC~rkqT0N)UBhs8)iVOesx? zZHC>Pijy$WEv8Jz$D~<=6$0H{4yFnxiIOz*r}>y-JVHB*wgp5nPDY5VO@AG^mSQrZfBQC#q9oH1)1v`?Y8VekC@G1PjB7V< z(C_!@_XlilZn4~5Vf*$j|Ni&?fH+Qr{LLC}+_=ePIHr+iIAb{2+hI65;?${CkdAlX z{DAXkw`i@ba{0N>FiH2AN`|$#qa*Je@2q`h;_-NoK#|! z3MA~@T*zNJE^2Z3y$1>B5l-Ob9K0rksEi5K-Vr+ku-$7VTr5rRwN}X)r9PY&p&OYO zDw*p>og=Es$Qf%z%pL5Dn0~Q9Y&)?xTp*b_?j2pS;duEUFq=EFMN)# z{p`>1nWr9Sb*U3v&ymE~V0cz4LN+o&6p;g)gY=j|4XWkz5rb<^+iI~Y$K8k(D;fpUQWm4!vV?` zG**`JDk48RLP&*{8fP3*w;76c3Y+m4-+zf0Uw#GS3YuBQ)vNDu*dIZ&$s2FHNnYe= zrCHxt=ds7Gu)e;*BM)EVnP(p7(!&pvHByxDC>(+Ud@$m=$pB+5ajR89r3>6~A2aA7 zCliL(u5o-kWH^~}_QL&`!g1U`=B+oc(rmSvnv!m(%WyDacV`c+QyzTqA*R!u?|t{X z91r?*+Fimp^=y-}#sSnjihyAEAmKTIQ4$nJr*4EzX+RSrpZZVllpbMraTMMPSxC5zO1Z zQc)=ckx)T+fID#z@4fx*El(GNdBM*@a75f{`O=;wZPy2H<|a3mD2ge|;-~)5zM7N; za22!g7g;UKRje_2P9#;>&8Q%{Mo1zhc>WU)@ms(8HGcNfpJlDtB5pK6MR=t!&LNm$ zN=ur?xM{(3GNhc2h=ilDu>o-tb9_WmmgM=Ad@=-A&`mQWj>s6yXpAx*l_XecQL8I> zXDA0ld@ChN8py1PAB~wz29%V@2->GNas57{;{j2lNxHlq5dT$QEF8G9WK!hBal~Xg zCZ9}LTUo{#!{~TOduaugbZ{!AEG?5fXFAO>#?$MK(Crn@oIgj_YBDWLj)p_lS2sdR zap;1ucqFPq0p@6Hu=$uc){OUdxq1C22S>+rJ4;-;aGuDNxY04A?QQIE$kbT2F5b`a zV956N9op?KX%zFuYj4o(b}?m1v)Q4UwR!#ZtK7Q1O`0|+i-MvUBalqTV~Xj7G}2tX z`Wi|KqC`_-X{SpJ`eROAydU3PC*67k(Ye6mpZOfeQNe?cJj9*-2~j3#cVmn#gD_kx zMx#lkJc{rx9LywESR;tF!sLc1(P#-$cyts5;gQw|>4+l1bYl6&H(%yo{qz5m7hiY* zIqjqKLv&=T&Uud3SM^OLg6+;PMC>i%FafLy6IWjm+mzAGUtMD1G0bz^bPK)_1 z_a;#<-i)*8KxczrYgX%4=)y2P3i~eO1_&oiKR%Py}rgHmmXy`P57I?_A`9;nNJV_k#b0Rv}682;6XjG?pl8G2T0%I5)bfW15XDGfFd8u)4oR%1-R>aqB+IL?p|OR*#-8rV zCc=BL9@&VH$2m$nPF>i-8^`I>4l&LtCwu5Dr#w{>~i%qc*byij(l0~IwJ-l;=e0`lS|JA?B z182^VN=dAvu*-2CCBmZjaQ6r`p3q7XM*9aGjK(xtP1a7ILawcXb(nsSLHPMDO2M!QKfOVM$Rj=Crb#uQBZW6J$Q@;oPsB0Aj`T1k{Ip(p^s+R_?U z!p>Xo;-zAFWrei6got7~o2O}SI{avWDROj{;);Ufom~)$?$R=DI>JkdPBUEZ2q`4x zXhIPUi8~#NVGofh;&vx6Qi>6=jKGS@{qOOK!tVAM9rWmA4O)$imP#?xoV&X_^bUp` z?(Ne|64tuQUzh_ul=GmGx76=K0U@lb2qiyVT^uxijqU-exo!Fv&+e_V}ax=m#&dzP!Sv`|o3S zug?qLeSvmoiOWwu!~3^)crc%mCYnaK!@b>~oyr4JkysnTqmeXX9Zn1(+)JH%md5T)J?Izx|uP!B;;09A{QKBvRr`fio7V zH8M_6tu~Yv`coJUDDw%2y#queVg0`Q5seI%mSA*D|E)I}+}))i1d$N90^&GB#|cHE zh?+@I`b3hloDvH`5=V>=`wS})al6^1G)34sI7hSHKq$??8CEWw!|8}}|A3+jfJF(K zD=V?-46aYiWs6K|aYTg-G)qyLgeJu)O}-RaVzd(QI`1^KX5ZQ=40KS}D!M zFc|h}wo-0gzk&CjZ~pmrxU+l6laD^c=GqqTzITmDVNg0^e=y%M(B`l^a>9jJmgmUVLV-?C%Z^-kUMl&IfHKq4t zvEbX^`b+-qZ~uLm9@A_Hwy(Vt`blT2e|OU4TN~v?RR!LA7#Ax=Ib-_T8U0NNf1)q^ zX?w(qC5WvLA_3P@lgR2gLPK>KE^;{QpqqPvPx%u)NEU&K8T3V(UO&E-iXti4mM%$D-5Ty;YcR-1cO8N)8 z!S8r?2i;y}{q$+5I4Q&l4ElKQ86O@I4F>qh7_rg;xk60A@9b>oeB8Eebw(sJsL`qGw(VB@7)>qb9UfHCSo~+x#+K?rfHJh|rOXPCQ;P{yJ z)eZXnK9h05{r6qsh424}ENye){C)g~|L}*btu*<{m!HMCl9yk49i*q<>$A1B#+64e zlSUaY{pc0?g9)2kr?|7X$3vGNM@z|%U;Gi9S3XXETJXRl4^a3KSr%hlNjWWOwi1fG z2xe=SleJ=|6FtvZh-cc+PHKArPuc$*Pj57ytgOZ><1XIxIaa z5o=W<2BczG;73Z&fx%K#SN%M7Y97HI%7Vr5uq3851zDO@${I`mV3)P*DL#GW5$;=G zM&%=X8R%@zl;A9r$q0XRL~bm_)Zixt!(u?3YVN=PBGbu~N1? z-R?4LTc@zraI~|-we4M$RIIMA(`a_Ws~$(Ve1c34B4Kd>I2efDVv$NTJVZ*%MRbqn6o!0SpkhVVNDu_oOOOI; zCgY&Y5klaEqAVR+g+w#sf)y^3GH`c=W0Ds{QNrDw1HSj2?{VYiHHZU!T&bY$k-mb1 z-LnT@#BA2L-6FL;#4;$HwF|q#xqT{#`~62~hKrY&b(NpC9^Tado`p4^ugLmJB6QaF zvReAriMop!_w{1F9vy}q^scR55R~oRJBQuly3vUb>&Q zmgq?0re&r6vBXJ?P#RF6V~XK~Tids>rsSx1!0};^bEme5Rm|{sNSb6Ut*>C6q418u zWW>_iTG$zCMH(gSy!|e55~1Um!^1sHnX|UM3@&7{FRgU(&eH4kSXo|XI+-F6tSqmP z=LH9cJ=QkYX|AndMj4X+|%OPW8dza&zHyIrqvUB4WNs_RB-v!3goZNWgw1r1d)iO@I!T;y>B<$>E?giV4e5`Dy#3Z&Y~S8x&>w(;;84=BnhKl;I6va-BHnl#8KCCz4=8#iz9 zz{8g*3WttTM#BlC!ZA=Sp8wi!G0wVN{_Lk27bT@HX>=RtSd-_5G>H&03`mSA7!7ke ztv1%#a1hF{UyOCoKF6^nj#XIWM1riL7!4i2_y7DE|Kxx9#~j{zmwa#=6$c}{DeL4k z^AW&yoqAonjqcqAE`qG3n46C4*uuq3ji2_PnLSFWnMF>^iYVx6;lfEyj#f%xOLqc% z6n^oOd4KiJFv#QThSybpHjlWj(dhLz35Rov(2n(P%6*$nE-a_4BoZMt`DBW67H^7R zx3yJ5a#2$Bhx88n^zQ6%FA8f~0)^bQXwOd#4hGvU>j-y|;r zUgOl}2Awp<;W@gy$E6Dwh*Hh=_AN?hX*QeeZ0~UW`gNMkCeC;w6&@(>zVkj?r?)tN z@nU$`OsA+ugJz^?G@DpckWX`rcTA6tXfG`hqsWg&*l8YokkV5|o-}Kr(iA2W{NWM# z!4dt#J}5!78k4{O0TGheOQaX9u5WVo;se<6gke$8ICYxNgO_m4jLDrlO!s<>_j~Le z9B`~7hIzr-nbVxV?|y~{M;r|X-2UG8D4l0&EG9R^Uh|2ko~GGs5GjZv&3o^@&GnnN zkRsuUC!XN>=fB8HKYocXfBEwmGhuk#qtj~h_SKs_cI7b+_V>vrIiuke=@h3=pW%(S zU*mX?v%0!PBWp9t9p@jv!eM_vv%L{+7i$7LHJ0RMNm&}AC@kxBq$>W54AM7SwM>;T zTSXFNApT)3QbXxI#i-!rA7AAg|M6SwT)U1hMkp;2uEZlyDz3tu+-wqh@=lybPW-fk zRVSG(&Pf&z+r78qy}?iSX2LihEQM9F`^;n^D?FtTQRd?)oi(X7-PqZ{{D`8MsVOT) zjjiNwwY<&F&=5*UifSrZEG+()x*aZD*y8cW@8c_<{TNSQ+~j-vC zL>$LF`sgF!wyU&~&WEySXM3A&x5IL`8zyb71~w|093D~Tmho`RN@tlYZQ;-uQ((#= zjcyB~2rI!11{gep;ef+~9@C={CbukiSGn}?qmU-#qwsUZt4pxFLNO_^Mm89~DaeSlxkR(s#+H`j<0Ed|xW?iBE(eEu3`avwZ=I*n=lRlzv$nBP8TSi75?{)+9+xSs0wL#IYtxRAuk8 zVc!@95xujX-}}G*fPe5`{X?XgfIA}66Qr1s7Y0X)k{TsU_^ej$iW7&(2h;Lw4_G-r zd^HR4w#Lecd4`4z5duHW3_lzCC`D;Xgj6UUhaIO4)A71@tc3+NPf3WZ<>#63oJ5n? zS&3CxOTE}!R2xXGFy5e@K>LuTXCsLrl(QO3yKDH3FMXE(?8{%`()tn&t%9Q==>!yp z2oomZz!p%}`+FQ8_L&R^EUk3eJhjdT*FI!88li<|eSL+^&1JGU2#Wg$19t8l z5+^CDp~>@{C!V-Mnnd6&y@LZxX*hdfi(=9Ptr?e=<&9G$%ggw~L+oIHFN#37izAY( ziFa_=Kj!^+Kg1R#-IW!VH`nNP8)z925|bjQv$=s-U4@Ck-M-6UJZ1lQgbSHRT)1$7 ztka+zj)K5jD8}O{Y1XFM?(*7eSNY~Q{{n~P!h;XdZnw$D6Q-jPSsbylyhN-58SUHO z{Q>WP@IJrwpZ+FUBV*^z9t)eyS^xkb07*naRK5O?|M;ihWHKJJvD)RzqmS~|+i$Ud zaKO_a{}}Io@IE(g-r~Ur?!y^FZYBMR=gM=Rr+MZAM>6J9U;PDicbTCztgWvS#eyWQ ztzp4bnz7TaWiutY2*cmcdI|lL71gtzsQk)HgRH!G%&(%r=Dnr_d;@4^`!&Gh;#N zKFiUl2SL6rdl&FGwG>Z8?XH(zolpwQ|56?L;%2?%EEA&ApsGlL_PB{fIf0Bd6IX(Q zG&Fs6RFjYU_D4#(TiQv9>39e_O#orMAJw18TFjr8=N$O)vrdBW z%e9C73@sH=5ru(l?7d%#u0pG}tV+A$Y(Dl+TTsROL;gcpJ(?;#{zyJalC+;}qDynY zr=-FaVgBv!3znvCK@S|l!HSgVBn3Y$I3WyGpJNJ~B4Sl;U-rFgm=tC&f1Td^67Usp z&o~*bPju%ss*UPrASC`2n6zIfkvgiP{qE`Y^|uP&jhSz9=DFG$E!#wk#gb9^WBG6x zItqFq{x_U@EH-x!=l%4PgMT}>yBk3_PaOw2?{-U>)B;w8*Du#@T-FzW+`X~!IRA;) zd?NYntrMD^*El-kyangPlWtsY$lCBfeyyWH%O3kTXz1E`T4hd%#eFNe@q($I0)tHr)E-GJot~pHsvZvBIQ^V*!7^n_naCDK}Nz58J3~+02+L5;Yd{ zKeUcN+D2`;#Z=Wa7BpofievcOJ7mjjjrTEOR8xo7GOFqG*MiG&9-M4g+61nG)bh(+ zS5#{17Um5K-HoNmShva&H6n#-A5<&U(f!B*xCgSw@gwNMO-)deZL+65qZ!J;hcT5~ zsU#K%j6QUtd-uyS8 zqH?)<1U#uksO`48w=7v5CGgy_#|&)Doxx2N{b$*4z!5BJMOl^1n=Fe;50kfG4|W0Xb8fq3-@o%*KVKz|J%hsAjv-kXEPi77Wq-f&Yl-BNvY)YW zB!fim@I zqggWup%HW4_gWe8N=Hm_qf&;2(Y+VYU!~F38Toyp;rXR~E00m# z^7z}2sgN0=sIMdEm)4&e^tPAoK6Kp9pz4Hvqa4JSNk0M0p8Mf_6C(~Cf;Cz#s8K5= zq$w2Nx{&dqpTW6yY)EF=xyTj55!g*+{VeJg|G~w!&?+03(Xj(LCE!L7>Wf9DrZhz~ zVz#ITjYOT6Kw5sQR;hw{I&@#O0vc)RxI14YSf8v9s^m03WTj-Wyg-&!EQOiO&N~R2 ztP^2V)nf$#EO^O04EFxDT&n4Q@dV;4JpcDuSd-qrlX{|^u*DN506RGXj*CpX3 zK=4C-i&B|Ju@;M}gBk%Wed2^iO;R#}Sn7$Ut?dEg{UhcS?N1S5jo+pl>keidjf2a} zCbHh3&|tJ{%DLVK_9+T+Z!?~8Om_w@v>L%Tb2h2Fp=#BBAuR_R6<*MpFdYkTK5 zqU1H0Lw|E-r>XgFTAVe8)tEf*(B^lJ6fAU223h7ESY49YZ0-j9rSGF~+bOEC4B@-| z&{&YUtsZK3Ohu~K@xA)&O*pSa*j-~fkaQlwpZ(1t-G7GoL=)IS?D8Z*mz^x&e7mV?VVhftaE5g{?OpQ zYQcl{gAdjKZC!Uv5cXN05aHNh4)c?vq zFawUPcz=6E$e^ISgS}~#p%9#<5VL$tS}}gPAF5c%LIrJBDw#F+d0@Fb$kO-{BDZJA zaN#=^I6lf(Z5c#Bp`p1fZ^`3Mb9(l4`b1H2STPhiHsGBpRS+peJ1fM`fVsI6B4@P! zFE*C^+S~O>+*KVhQzW#j8>JF#}y+T-_Xd6-0@bCQogl`g-9~ zMm2&tw0@5rUxMID3TG9ZF2n=YE>>Zqd&hM>Kp97Cf4RTOiymUa^`<%4xF@+{0Y9|o z;j~>k8sHg_-7U7AB0F9mQREy!3T8^rHBUQ6ZF%?P96E*5R4RS(t)a9P7iS|+xop91IBh<6s7;ulzlH9*drrC)QNTtRkwBII!XJ7iXEoZ*%$?+xd*-U{<~t7RD{SxPA? zYU)iv+-d}8ZZmpq?Kc0j*z!WR(qg`kxH&w9&Wt^F(sqs)*)k4NQTtSLq`B z0=ovWsJx$OKp2v<-&I%%gQ5N#^q}q~oy>-L#>>0tX~&_F5lk`VvOm1_!$}1NZ$o>z zQp^d_pvqLnB>xMfzLKKWPT(42DDoz<sdE#qx}*@#??%CXL7{`i8KJ zT`XdQika0S-?=J`$276}D0%EtQ*HUYLL6i*d(axCqfQw9lJ_wu8NBjre)?|^>qiwK z9*wPP)qE{mRQ8kQ5WLS3k~e#JadCC1*Xf`A8t=5A95Zn^c-ktwSQlh0U$kb?B(i%i zwBDm7BcexmaMFCW~&yG(8n0!ywwE zc72U^xBi?OFp>LQ5it6zGcI2-R=wJjLM*AfkY~=+U|*4{hhGt2rc@59P&cQ`p)ygI zc)60`Y7&opN3|i4aW2V*;1(66-DlHe9j^UeX8t=A#K9ysXvmy6DA_V9AV9EEy8#_( z5fjzWnn_fbq$`5<f8CZ;!P|MA%%?0LG@a--@C2O~i|{UzEw>^MA=JtH%K4$t z`I{R(!9Be*=85mb?4VNf)4>PaYuvPGOKd5koD-U}Yas`x%?lE^n&aFtLj`LpyP*YV z+S$glDOxW<~vM$1jxfh6z*0vt5lK-iZgtooQo6SvoM;#(tA|gbVObG~3sw#~alFW*u%t+0 z@umy%9eyQi5-+G1E-94M%WhFnlOtzmRI|zRX+&G+-5Yf?NBxEKbU~U`(bQ?%a2W=q zg?G++TcB=kHk~WXVr#z3K^sOG)f|-qs7Oxem zc$~ID{1+`gd!()9PVO#=?(G8sw|oKedNvntY96WK%mbY@4i0SaVA8~9ZX6nOlpK>g_(G#|hO-M-;doHHT z`a$)45vE_l5f{ypg?@x3gTJXK^S(II^u~vX%gJSIn2?i0M5`>n|+VUy~(_z%hxW1GA%$m`I7>om)<@a0V3$*uUiZ0Pr0y)hDb4OWzP0(fzys- zv(icwF(oLXpUPB>?EI)l>j>)ld{1U*aeo{mP54T6&eD@`+v6LPVg6iFh-{pg?xqQQ z{Xj8cIN$7zKpVmgIwxLbb-bRDvr4ASdo#PZytjz3v(=X-(N;79;yRRN@I`C&@)q8l zc|NdI)6&1Eti>m`%TyA1NJ@X>c)ud24f0^U-S3V2v!Yd^!&>*W&y`|VUpKQmI|()A z&iu}dc_kHO>7!JpZufVGl_B|aG$AZ+VuEeE>19o1d**#j;t03jWYJWmahl+_Ou7Vo zqJl$A23Wi+Ngk$}@bY~GNK`hFZ6zsM9qc0Jf3|mear(R;x9Hzv{M zH_U|lHl=mMZ!+ZUS+do0))BFSY74({z$a!Pnogp9hNbqggDN1l{3Rtt)!erG`d8yU zCTT1-eRWvz7g~4G-%L}u6&>J@4cs}9jxCg8uCeJDr;Mu=mG6Ys1PR=OYNj2&#>oM< z#|{_s?=`KKco|Y0A#v%5t$TvV^0yU~qG@z6pty>4E)2Tpji6@Q6p0buk6p@q$=kmb z?Q(?l;D{ZGeem}*M5!I!IVq(hXXiw1DgR~^{QC{-6 z!J#^d?jOKj+NNeC%|Epp^c=;D9_cqV!(R-Vy z*^=KfiR9X|Kwu~*iEXw}oKD#{`wbka+Xy;E_54L_4`_iw^^~xnk;5K~F}}0+A-Jym z((?hsAD@Rj_Q3r$bB1D(&fb1$;^t?wWSatoEZG&Gv;V~Bt>%E-$W7n?R?T1*aeVwg z*LrL1(u~N;q0;g|o;TCN{C!idl_ogSGuDQ!145lwLTU3P4C>MjbK&3+=a$eUbT~5- zh3i|>l|n)p9d{365VJs>+5&MfF;S?I7UlZOZ0@JyB{!zwG~@xl0IuiQ}$I)6@x+*UbUHel^(@qH-dG=t=D zW=}Zk#qp!@g>#N0^*g77MjDF~56~x_U_I!t!cw?ARoETh2TRU!km$7AFS<|Y6bqMU zNO+Qo%0<{r+MFMJC*MXe5YxY9!Hmo&3VqdRS5O*AcSeqV{|7aZ6I-Si9mbp`=!Vu? zSI_TCVMd`hY0OP&l7^<7hR+C$qPZru!TGFnf87G<7%_Pm6`{hf5W6AZL8`nreEpU&ks!uB6X1}CqdYPft5tDd ziQ~q#N;r*Jfs1{>02@_7V_5-MJjbPQ)HRxY_SkPu*7~RHie17uqd6-X=52_y=bK;7 z2qtrmYa84`r7D2vk-hGM!7&TfYQwf3JpVJxpKlaKLXf1}X>#^9z1U~(vfgXfOdVd~ zf90X(Il20?#2;d>BX2rIX@@kv3p@g$4UPSOL$JmYgLMjGvjX?L(;92DAOqHcmBRw* zJ_MqkVVOYUu)L(`BdA}*)(#ZQ`^pAlOcW?KFJQL8?e@#L1?MRIutlB) zr$-O=DUZ2A1}>kXSs6j#j}JcT>ad6(c36`x5s2^>o`Ut`O|Mso>0r>S**R@p-J->E zdYJaJStQ_H8H%uWcc{1RVIFA2k|{F+|5axGt0u=Q7UcyQfvXi2{v1 zRAD7Phost;XbQ7D zZ*&=4Ts=R*i^*FjTzO4j&c50d1zKxCmKP_FjECsTY(-Xg7T2JA6NVCS52kW<#dkRzjshwUNNJ(S7TKVCxi@2^0=vqk6t^1m|L?nYoj+SzOXvs-urpJa0T4$ ziR4|{h6UWbJil7oK$nzY))`RM*)NVW1wIkGmbQ9Gq`;+YxnY3Fehl<^o62VG|57*F~PbmMmt0p;e8p<`hm?eR}dnwQ^n(OZ%)3h$GtOop=fQMq?hWn>9`DP1y_1@-dyUkOv%agm8J>_<{CLr_o2qnWS{g3;uf{muJtFIU6VfBpQSvZueC%!3F#b7h1 z#EQd=cktCf)KF(gogD#1^NQ{+%b?g-1~BS3t)Fjfpq; z>md-(z@`2cr*4r}5%!k^q;d<+^a=ibs`J}vqNBhVbM2Nm?tVOA3}$GMjk)ZPiXJp= zU1blNMp0L1A&vUekBL|$=92~dT57SQoXccf+;GNc@L{q6{F_HDm*gop@Sc_P(gmHY zPEXX)I+$OoXlD?uXJwV$=ZcgI{S0|3sUx>*;N1aubj~sDROy2CPoO7=*H0I!F~`GU z*M(}FM|k8)Og}Y#1K~q8v-UOd#hf`~o~Jebq58}`Ib(O5asFewWvW_@k}WFTKjozg zFhb4FY&~-QJHzrQum_3UJ#GN^^(~6(O)Z2B71vweLw@j0l>8XfzmL*4UAuvA-%MbG za!P)D2nC}j`Lg<~1m{Ye*QWJZ4bKfLJzw@LV~lg&r!u1-8hl6sLuzzjUcEA_x-CqYqp&)E(x(ua*-G62v0tMrAe5&C)j= zqgl<@Q;V(BH`+Ab!U(Z@hfb2*6YNXm^7NP#RH#IP!9`l-^ceeY(vKIvgveW>h_v_Hp8W9oOI)8my4vQ!LX4RuP%uu%+F(PzIzs!K&cJJh+4YF#aoO)4p)}E2%fDj^l zhQPwP=Q^EYAt^Va@sWc;h%T;0CbjGW4iaZb(JqXdO)q3oL6XffOWi^AzKN~knkC)> zNemw|b7}kgiCynq-X#rXD>vDYtGr=BwdBsDQzKT8ZekH)FheEO=gB9 zlig$`sTW+%AaVS$6w!6psv0Qc7Fc$*Z2M`gBdN(XSei<)N#o7tPzd zTpwvG<_ZS^SMhUVnDU}uU6i?v?+sr^)SlqV{XbsmRp`t4PSPKNPWvnMB_L^8q( z#mo|P*{mGB3LCPrUDdf#&|}i#!Q&%!GAbq_4C*A?;0oLZi`!BAj{oBVBzwEyywyk_ z7dG)w#2=$4Q9D#nA%^gM7omY;ezr}90jKA_(pJT3&L6Vx%wST=>Uxu={2l`P(ae9| zv3Iz}=u}Cyd@fWg#}9qwL9@bi_GCJdSgGJh9X4zhjZbmb$Vcw@=Rk#(6c%E{ooS*& zI-egQM9br-Zy)emT3JngSuDj&Z_pf?%abk7r4Z%a4pe-{%^7p{UrG5RgpuJgl41q( zU2eNxf1>+$QmeTrW1NC0_9k-1kxYaBQ(Z`#u%Mzk;m&ROb^iD8PfCB#>$uPAAoY_10b^&Mja@h~BgngM)mZ4h{E~ z8osRj@=EmYoGeq#^u5b&@_czR{95QdckF_=TByZDHPhbh#RGCYciuOz5j&jJHC;x7 zWxZTq2mpcVSbeso9>g!Ux4^Hsp5-u8m$?|`0XA_l#ZezV=_-;U8IF?~Ln_l3C;rR4 zLkU-xXu$N$h!o1nf5RAvEm+LTT3*R8h~dmCQ%Fv4X;)CpEa4waTv&Mdb(!{sD#0@x zET*g?0ndkFI(1YL4{m>fgYYkqq6l`S*7sOb&A3R%XN!g|VG#(T!|*-iY;5La#)Fyu z+J91!(V^re)3x8Tvg$bYcu4zZNb0_`3@FEwb@+BA)tCg^W<-h3?d4aHzSCtK%#5aKtG#?U{a^J#*t1wd5_wLA@EJUHpj^gwxpOoZhYTg{>;=Pm~~ z58-M(jdtJ7g<5PXWy$C+)PFrWw>Jx<$Fc_)AL)#^(|>f-Mph3W z(H>m@=d2i=?6#(p+K#%3Y+VnpV6gl6sD8r2~0zQKmBI=8T4bmaf%zlpU&Li$5)$x%{m7nx z`>5Qr-qWYASMwjA=TBeOsZK;h0RhV=Mb$0lcO{FA11sN+%G$P>l=Iq?E@_Xtj}|n{Fjc%xkKYQDDHXD z#cBMpsaKEIJ+@H#>)e|oc%=zBT6;J7V0yuUpnrx%btI<=geFmf0)k}(&7L{FNqu?M zAd9LnTc*zXiS$>Q49mPtG6)8sPBQSmi3!}GZn`&Y){J$<@r)QnDj~oBvL?%hSXcWU zy9r{4i8Ic!Si{f$R@BrrG)R(%QXPr(dN2X-BY7yXJdwh}G#CdC)-_cdC1%5N$PxCH zpWRMWf^JUFGX1(A!RdIaav+c-0BpN}BM1S?s|KAIjf#*;v+Zx2j`B4Ndm}p)g)Grh zsz@T+k3~^d#rFmu7+?T2~ofCE9PqR=V%EeU)S`cUIM*2V5bbD4+);&1uyhax|v0F~fuaWMDn zb>|Jo?api3X<>-b@tEKZ^bTE8HokD|9M73ueUwc#*MY-j&Lo2=f;uycv2f6tRJ$0b z6P;_K#|{;AP@`pM<`l*<%teOA#1$)7E5M{Cpq#X>P}J`&P8VEUH0ZN~5O_7Z@?XMT zjS782JYXJ4U$_01A{y=Sq-kxp)AJH7vo)6Hy_S237xBz7PJDi}S3WCR2Q4S%BsY~A z6&EE=g6cPOZ7p{Kb@g-~b^uXN+-hZq7r&KA!75{6ZOt?Ni`Kgtzv(#S3oTG%HP$AL z1bw)qEub^wrr@EDOQA?qPWlk8u3ofg3jj_Ud0md+ZYO=ldJ5$PfxX`46NxvH8%a-27NP%UN!a{b(251xM}$u?;j<8W9t zhI#<&NfUvXqpPb#!SpuJ3GxqF4gC*4JaX>-;QJ)QjYlI~to9Rll$%}neBMS>>ie`_ zVoT0)VIzx#@8d}o)(s3CC_IAxW9i3(*y>tB{=Q{G(yyPYP+!_JG(r3w1%9+NbY9v-1E$=jMtQfkUxj7{*fx3+Bv=Ud@D~L7K`H} z0X36P3CcOL9KC?Phb`i>#2TJSVScWI{7Wo(@B)$Y!xrtQVsjr~b%0k69~4bVG71OZ#NRL-}Kbh7W4~m@auReTK2IE@s5nVNa=A zEyfAC7Q1C&t1ND(DYjOrP!j+QCdCrSaYN+?iGU=WsRQmKbCRLD4}eEZZyk`0V-BX| z%2uR^W`y`YqI?x=Nit^uVT2k3!shd!|9b0KG~ZKEpUl$YI#Put&y#Gc1^?(FA8Srd9%V@yt*jWVfd^)Q>7ZZ`#Y==U_JpRUm z(v>veJOh(RF&xemdGXvG2S51C9;U@(adB^N0qUnYhJNCiAD<9G6upv$bi6ZGBFLOQ zob0M#INxP7rf--*SXV>KyWAk{H|Q?IjNYDpBCU83Zy)20afZ1dJDUd9!*8*_j6*W_ zi(Z6GTB?K7&oD7}%OrjZuSXNb0tvA!OnlZMLKqc2Iwy7nGrB-S_2tdWz{2gaLr1qo z5n80^dPKNMw-u+!6fJ%Tms1vZZYywqg7=~1MKNHpYD55_qxKIc>RiQ|QQs34X5w&I zx;N0+#V}Dvfq7wufAj1tzT2qF7TVo-TPSdGvF`! zZ(`6woFk?^$2N__=%SfZMyx^=35$T7vO{?o9F&P3Mjs;;1cKEyNN&)hXr+V{TchMi z7p&2pK?V?9$L^VQXZ3!F6Q0nZS2dl_f;+My1F})=?ZU*`QwG*9j*c>bgy+Vub(IDV znl4??(ydah`t5c9kEJ}{vvZpT-tY3^4s|~Ll$`WjsY+5iq;a97`dF1Rq zr#^zmxq$F|?7wiOrMac|?62gm~5YhXE{Hu2` z#)zKimZVbw#IGNPqdq$>y^q7CjgLP-EY2es#$D&gxmApcit2GqZFNcCx@XCDIY%SY zEb^a|44ufyj4Yi`U3$J<+9z45k0{G-av*X!+@1`{`lX5i?YIdUCro0 zt(1OW?*1E~C=<3;6EVJ;SONA*^UfP&IOxIG88an2;?;k@9^=(2)bp8pob(?#0pPH{ z!zt3aB~YJ@lVTF1_v^W9bP^*#YGAwhEXQH_d@lB?5IxVGh9bp)7xQKUqzADI<-IebOn$|(`M<%U z;fMFGf_~RI$Cugbx47>Q506fdH~DdkRMLZjkOi~F$Mlqcws~|d4KRxM+#t)>f2v>H z9!3me-wT^*i~g410_W+mU(C$EhxpwS4?2A|188~S2;w%g%|V(BW5>M{eS?l`Ojx{% z{_=OXW5J5ONw^N`Qpx0I!A1xRYFieI6c}T)&XI~tU3tniBKSWAB4#+9qwxLHD}eKm zdp7wzEK=0e7L`BLR~=Q28LyfTW=tThXdz~SCCtdPrt*bwp#OAvrw#v(9#3L!Z7uta zGa}V-tAFk6w$g04?$!3c!s~y*KH`L4e~9FC=U~VeF4up>Mq4+qnXE02;TLxK&b|j; zkBI(NE_5VU^>N*AM-j!P+H4vv`9M@7$EB{}iy)tdCZ{;rq}ykxLT@(qDdCj_gW3(M zp{5p>0ynf!69{E~S>WM9O}@2uFkoJH+X1Ac_qge^CIdtlLndsoSX{ij{XLNqE#|Y# z^(S_*u0*Eg=uq(-h-Y{{GchKrf!7s@3T*4S_jPIAm@jYdji1WoTTF34aM68#FL>I1 zqpFc%{`)tt=j|E2GC6`Ds+0@eJbhYy$uKGF}`_VgSAa^k8F0e*gs)7}X+ zM1&5V0or^=P<4&@5T>sI4q}Fts+IrL)cA9VhmYPkkz2w?6(65}=*?@a!OeJbGC`zs zXos|5{^gh$@fW z*tF3>9vBsg>4&-5uH&O#S=bkCh1m8_Do7Shh5?Z#>?Me8WCrX@pWWL3sOeF}xc<2w z92tJ+jcq6w3sgfDR>7w!%#bLQPAi^t2-qQce)i}bCz~`JfSaM46eqIyWJCQ?T8+7> zl@4$ROCuw#wPeuQ#z7G#Xix0u-LB9QgPO!*Gh32{-b+A$Jv$&(7i>=60t0u?ac?!1 z%K$gw=S}FmYzP>2{iaNf6-5*%(nWgJpV`$H7Z=TWbB!_>frhVA*0r`q(`pa+rij9e zk4|YXq6{CACh-J(uGwPT-@h5C{D}^Ud$RNarjM)^aqji|*I77yTUV;sKn1gi=I21*PWKh2Y*qXJ7X-LG0f7ITNjEIwK&x zds-FW0vTWaBdyE7cz(^N>=t8DG11)(6_fq*=g)UFw#)NDTGbjgZSoDf<*^>?-f)lt z%ferYA!dzNZ7SxMOKuY#Pb3TqB~!M>8kC;Ofo#^LVqz)7&MIou%Z=!7Y1P%a)itY| zTf8p){Lk7&Hh(kao^4z+dG->N=O;*svX`d7Lg?PK^$o#?>ha?~>>7r@Ks#{aJ-_WY ze)lQi_)ba|##G|Vk>CC9paq{^wG1X#(6@(_>yMiL*iU*Lj4%A!fQ3bSTbIi6q7hMk zvjcsn7|y^6!V!s0P>&c_egDf3A;?knlVx8}VxylRu`Ei)+*aIl*SlFXUVt5hG0Hn_ zhJzZR%|3dXun*DuI1j$} z!zET31AqTgwi^Vr%u-^9QqlZ#)W=g$b=sx*mg_~k?t3W*@o5}=IPDyC1X}>yg7y2V z^*;8Lcyo$LtaGGXfaoao1nup{pL8lytN3+(@y4ok;(^cM0&qc_wrTjE*Lh^+7C)|T z39l1Xm~(Bo5RHqcqs)e7Ek>0_CJ5FXa0*SfkBizfMEVW{oE!kWRp{>TDuCYn9%woj zONV@=nxnptQHJq-7+($#0b$l7bN=V*vmy>Zj09QVvrXwE;2gnEsZyia;=IEE?3?+E zrFDSK&&p-<%?7lgRUuESu)PkrQrT7l$0qLehi<$U?-Qz^&2awMvW3j(FyxUPvETFu z+CP*k%PVuCLCR$_P!Cdo5KV?zu1qI7u_ldyq8wa2w8+Z#+W1XIN?H1xt@LBJ5AB)A z3*seVVv`0{?(Ny`P-`%Vd!Z9#1_z6Gp$pIs^qq4JIkxKUbF4MVuE8-AoK(p$vxwYT zoV`lEn$-pl@sdn_ER?$s(V_w9kKy^A6dm_lf;*aCj6AXf=iNA?-HBRS9mL{FWQO}l zgPy+eL`+;_>iD9Oti+H_6JpfUUrsqM&!^A8q2CqbqcZHR=sxN{Z}9E;>fR=dLL?V#%ItA?@Vow}66? z0vip*_*08Y@w_eK0V*x6A4t~){(y@Ym!?3?HafuR`kU`JdM_JA#qyy=P7{COyv>Y# z&1be$ia193T^;~cim?cy!=|`?SVYnqS(8XyOyZ3F>?WMLV>fl+I<-BfGAke`w&2>D zjSEmQ0ecO6ltq1a?sPz9S7Y`tXOR0;fa?Rqu~?5Kzvob{N-CW%#~)nr+!1VoK13Hh zA!m#OG}#&3nzLNsWC6E0TOwOC@~|YE1o*6MF9q5hJh=+t47tOlN)_{cobu0qEl*XL zHTVM#2%An;%ug4gU%q4}jk-1FLPSXeHywXa`D&;1FWs{$lXGXuJH7oZOHv(hE?24F zEaT-y(l(txr6*3#2&e#ZF@p;U`?(lkKiIOFo$5glbx93SAy~EER>XVk^kwbzTsq*4 zBVh(zI}!$uAG(PrxI0mta4Syt5mC(l)nL{F#TUE(qC0rRX@Q(emU=#T6I(V!tz_K_ z@{0nObQV$QpLmi$F>pm#5{1?nJFTqzpO?}vMSyubEbHaXJD+by44EGo`Xg+eVay=o z;?TGq^i^Qm&r@fQZ*_U>{;%PeB0O-<@)*{g0-F&KnnT zEaDED#-GGW$!I@%E;ZH`EU!kO0kwJ4OFPr?@HaqGK^&#m3qZ5=oVbhDX}9fd(%U>v zOfyKoe0lS8S-Wz%KpO9VQ+K8UFQ1ar^@_C_f%D@w#l zfoKXLY&7xMSi!MfT*Idkp}TC>-FZy|V~10UUgK0Hkq*hC&t{dej(bp#lXc76S-j@! zr7v@o@39AY021_%%WVwWUoY&PcbNgmEEoxA)5*cI!(aRM+Has6<9uuRTICn*r!W7B zg!8w#We}(Pc$^}_URm<-rB4>iTM}i&2=>FrZBrtUKyv7GFy()*kU-sep$G~&p5M%s@zNbX zs4zn~9&64FHakU)b#eQ(@^qaTWhSjGKv6s(barGylqtDm_4MS2CXT(^tq1P+6|X1> zVsO6~uo8tp)*o_i{BqWVr1r{t{8RS(uKX1=!g>tZZV%#KQk49!fyZ#Dp4=8$)IgC; zlBBz8-lBmKg21sLzf)a=2_g ztbPi9A04epVrXPn*spVonV@jN(8tTW=*-MF*Vorxm*aek>q0$s zY|#5#rhgCOHVuN-8B_Ki&8^Lh5tci%s@6-#xKC?e6`Mu@)KcVm-}I5QhgaSvXK?XO zVAVdga>z+I&c9Ph*$Vn_grzgi`0x_l zx>unUOB_%|baBa5Bp_t4XTJ|cs$dj=oro`PIv)jSpKedn)?-=n(HC4k3%=vv{yD{r zqLLIQPPy|_O*8HJf-dE_ENMsI-VeQL{9HwrxjcC5Z|1sji*s>@RR?Dxyvli9`2uA(d!`_E-L4PoG{)0^&QbmrZ)bdVsjC8@z2jVNz?-5Ir0j$TlOZV$v-gl9PFZVouucWS!$G8-L4wI%>BJ&DIT4TO?tfIO3*Vf(T-dyH~ zwYw#uqob2rJ+@}*IDtRQmMwY!2G0DGA)G@Xpa z)5Q3G;NnrEb1TZP^IHn#EG`;_(b5BkUN<|+7|LUdAUu&rQrdt=RsZWm+RK4ydc<_v z`rSkmr$VCI`y-vG16CvM?DO4v%Rc2U8?Lipr!Z%E)7dRRUf&M+kD#9Nh>cgRt9x@pr~TO=}ZNku0Km_q}P~Lbb2HdYb~>xOY|+m4>(wo^7+W8O8Hu-l`5F>YY7}UN}AG&hkYBne#+B8eS_@;07|v5h*85u zx?%}78ge1Ao}L~Z3YgOEzMTf|+P#H({o=p@Y6;D~9H_VS1rE$&U&hUN0Uk2GG}pLy z(7ZE@STa~SJOxPv8fm>%OLcU5In0#HNfU?B1L$L+h~@W7$7lh! z(gA%}^T7Z)P?5CHONtm3A^>H-+=~ia^pzdI975n{Cb`$csHCjGRRE}M7JBgeZsPQr zMw3qy^76>cLae^)n30gFcj!%N{`;29JKTcbbR=fk` zjFB#E%O+D_60t-dHV7R24~5d&q#uINhiB7jW%(r zcb-=|A^pTM`kc9Ax94&GeF3*OFFBphQJsAWNNNUR>Cw&(&Ag8`ALOWQ5>hM>3ld9* zMNbaM@)V)U#m;+onmXTQ>A&XFNy`>cuI>A@i7Kz^8^*m^#|`)Pt8&Wy)|*l;z2e|x zo*d9Xv%%lcsBzEEdUr)+PmbocuoYAzvUAgUiR*Ccc6%Qry!J<=i}M5NGbS>YvjTA+ z%4mG%G_YL;#k<}M7tZW->r{p#$g5C_?Na9zDZy-HlP(xyw6kmtMaIor_q!ROajTy? zU|oe7Fp~n$qxz?`B+;Vo5zDDBNsXC$^-WD-hu&irJW#*`!eSORMk>mnMl(Hu1dkSK z#835Ic||WyCY4?X3*aD*s>6=$8|5PY?7K@SDwZrtX@pKsio9e=)x+v%cQ?M1bNxQ3Z(nK`|;1B(068)wdWW@?846t|FdTE--CJD;TeznH-nze$WRSP%27l{_=GewA3S93UihzeRo z#w4V0FzQPMGO~P&}uh6399^_i~O=lBW7B) z*xv@qcKg=_Ya6TU56DNG&?;#qulvnYxJ;&0n?fEB_V;;PhF}KVR5R;v>kQYnc6dt6 zr%bfcEQV39t4GwAll5oa=TXBKoK#CDV(Un2S?A=>rP!|DII+7vkr~Nw4)Ho7?C5W< z#l0SSb2Y;pVu(m;sNoOYAIn?gswG#|`&mcG9krbl7lxu6MqlK@V;3gI^^f>#(vuB| zGE{c4tS?BULAqva@#MvIIIW9ap$i0Sth1)AYj5$|#INP0N_0F`2X_=kfTcWiol~4W=)oA-FUiL+u4aq4K^N`iKB?I)H8_T_q`2g zQd-~G?^V(niyI zOe9PNjANC^*ABlFY{6WD$ZL-i9#+T4ri;8EYZP-V{-lH;7PUgVz@Nx~#7fySQpoOA->!4Gz{1IC=7sGAF_<#2Bg5))M=OR5Nw8yg{SQ~D=liaCdii2<{B-?yeIcFu{Vm2e$;m z>F-qiSKLrVP0`cc?~-TP!uG5-?ft$m8gZ}%Z7qx`-dU*3-u$p#6p}W8l>iIz-#?qY zM+c9IzFn^i5sM6QoR>#Rfu+~NaABb@Vdpo@;@B;QZ~)wD&V>}+>Q@FifWw9{3&C!| z4w5C0%IL~Uyv!amivba=CXJu`i4sydee#15pE7SBYZgA6#l7?*%s4ta%e{l@Sn4S? zZtuIihX}FiDt!x~ilMydiRNM|*P2Lyt;OM9H(Nb-7wo2%U&Neqzyn$dI(Mt@K12=JZE4bH#((thdwbLjdWkQ(xgI8OG!~lV;qsJZ!$ZbPmSH;LMAnNV zixL#<^SKG*V_uP{&75C+()%vSv19Mt51?_)`17>&ZcW#DoLXmJd?w~pifZ#PqT$0e zx}5b4w>QlBSp;Qe>CDm;zIhM)1#mdmd3`r%LD5w|bhBi8w~Ezj%FEX=*DF=f!M7G7f0t+t3;V+Q=yY)Q20HGI2gqSHzL>;W8uUId zd95Qp$vI&*>z$kf!;Cd=hIxtcXWS^=nP11>-=#c+Lwpl2)YFj4SCy+ zgWJ8_7jt!$_J|=iC8EZtbH8#>Qbrh`f_lwY6FeDyCN|_U4)ZLfe6FPIS!+BAx7VEy zI^T3Qwp2h|gk1WLw|%Ap1*L7)+(xGBG^7Fh=a#NxtU>=iBZFL)}UMi)B75=SaKO5BF;;N*r71~k00gW>NS7~UCl)r&VO z((Xh$>BY_%laXdM7fH_rW>VlV&dLbKU6)2oe_=Ra4Z8jWG%vPkb-(+(tT|^VI-%>E z=^YL=Ip8p22QQh|l#107LHgErHw_fO3TRsDT`n(uwKtl%WGDnRGp8EtKSa`z(kNuU zH!`Ys8h6&j#G&+|RQWloY1Zp8j7$_{IDZbIUdGXATB6w=2W@PIb6evR=ozz(FMoCt z3iVC0!oGNn>dKmZzRiu)l;lHNchDqbcl<1RjW~iCxVy>&HZ#q!uI8=xr2@q*;`h+W zZZ0s4br?B0$>bXqK(C&veA_wXWEHs5%F5soc3xl8#sj2FxLU{4#u@;h6v*(W@ z)W`NGuXg@^U=?{shIC9XhN%WUAog#C(5e;|!yJk$X@Xnvx!0*A}=m&x(dcTKrjUo zOH#M78eiI#pi*DF`8|y{OE&8*2>5&7cnY!a1tpa#WwH;-KZ~RMyy4s}@U|7eLmC0J_r{ciQn+#YE7`F7bLhRZl zkmT!)D}P{ua(y1)`u~L_i~ONCOLGZ$&0aGPWx?*aG}Yq?A5IAmmxK!^R+*nqk!Z^gFU-; zJz};opHs3%2`7^k{Pl@6oq1e^zj4iX7cfsn?wnr@C}!jQ)F&Lw`@(ZJmC@48KU2+5pq^q|yS zXWJ#~&P@C)NcJk=dnLHLFWhop*BgA$Y3^W|?!B*`?XUmXc=~Wu_?I!!v*RaE+>zo8 zL4T$c^_AgU6y4g$xpyo0F4*_b^*tmHY~?g_|3~#sNcLDOnFU3I+9$|QrPo)=*=8!e zvyj|fK0~@(!GcmQJ(~{UL#Si#m}?XZMHnB|5^RzIr7{O@JYu2Fq!-nLU+ZE@E->L( zZ%m0SQJ#Xcv1-HBbNJ||RJnfG-xwUC)9A`c!cMUH!>-fSA$p0y^sq5ykqUw%-?KMe z#;XRKUs%+N6hL!_e`);9x^{ML4Zm=ood{WBGAIq`vp_C=zCR7VllnewKKb0|97KAu{yfX5S*h z3>e26oTy#l;}fDn4a^lJj(Q_0yFpnwKISrv7jGO*-O4PEmpHhp`e*1t|l?e^sVeI z(7Kkoe;2)R-GU?%x#yFBa{8Lxa8Rm67p{g_)_7M36|!|#qCk{cnYtEB`4AV=9@8Wm zMVMuhSQb(V>08WClOI+5BaDqf#%P+w z@LS)Yo4cyc?^oiAn!_dT3j7U;Q2-=!5X4&xFK2Ak01YC)C2) zH_Ysp3Bl3~ccLCb7+ZyU=ZVMP)hJ0^+IxG)+wWvh`VKSmF4R!HAZ0vSIpm3dnCmSV zS@Juy2RS1x_T@PYt-N7P4p9NM8AcLjT_am`Jrn>p}?(s zw?~D3Zx&z%YRoL#{&3(yEXzLiP{F1tXRu)-%DN9(iW@D1KQX*Htt={OGY@u7j=$q{ zSU(DWO%G05j9Xtoxm1a+g|P3R!4t@kPbLG^)87A|IV2(?X6xxWIJ-@wQ({972h_g8 zV*pbE^r+I{{QdnCoF=7Q6OMH+P=J<$T4)D{lgtX0`c$XrD@~!Jd2v^FkEKAt^+cXC z5EFo|;}`uq;=Mrjo^l5k!uV~6J$k5gV9`j0C=w{_9wL}ndlRI0I18C|Whl?Up*DJB zAQg1!OstQ{nxzre$EL~a74s^Cvi^Kj8vNn?fg4_gMhiO_aZK_}$WvdV)KDdI2|4mK za>p5;In5P|=>caMdK6}4Ex&3ZZ?jH0O8}lP;U9ygA z3H;jQ;ZTRhxNe`CNTjmuIh_|)-pNq1>UOj}_>^fhlvNy3-@At!E$d=Z;_i)br6Uj3 z-A6ny3bDvp(tp@iAZr_I6@r^(L4|9;UDY@JFwzVFAJ=cP;~|7xH0?5kqaLT1T-`k+ z&DpFF`a&IRRi@!;en>01Qwh=YZ$ymhEk=YEUAz<3u`r()!Nn0y%FdWtLft8dBum-dV1aSUGhA zD1}~mlua*^yrw4!IZ7CLT^t`1%E2FWoBakKjhzLL2jeW# z=3Cz`yJ(^^;bM#d zF3;A;%jKqgfh2?dyJJ?p7jm!}w2F5KeTt%T({cZo_Vf=WEUFz=u#!{dPdiT%Qqo0- zj%MQUg^R4bR>C7F{zwY8bP`C94?KJMkj;nS`{y9rx@_1-=zmK7x)&a*cl8S32oZ-3 z9k;3mYr&SwIgqO*h3|Labqt%@z(csI;tBL}Ie>$J=WX26R&cU@>#&c;NL(u)*>#{p ziMs3ewQ2g$tJcmz#oDE#C5UELJWh19tcRemalgRsjtl9^T~}Wp()34`K9&kpE-Gp@U3TITry66DYNu|L!y_S@d7Fb}z)Er+B&Q?kLas z=Ij7Ro~RXi*y3giXmS>Qf67_0y*an8vl@rPJ$aoB5Z=zb2B$_`7Zs zq;sZSgxZB>tfPyeky>5!cZ?&dzX{d=<3n&J6`yS$;% zF_Gs$^07rhm?cx<&`9lL58d{$K;zBB>6tYSg{3bQ4q?ODQJ}V#X6AB=GS$WrxrVZn zve7>jNrX0rsl~Q2T0wAXfqs7}a5G3RN=ZvM9$z~(Nd5U?r zUj;nc-1Rv6oF~A9TwZ>WiHn=p%xE+k$m`c?ha{f#>z_LltVV6*KcCuC0BxlNMsSpz zwz5*UBb3doIX_*7ztDg-4r|N*9nkX|s>prsCo~D#3A$&hdC!<$se>@t?}ZG-^f0NBoMz$CwqX*Ap+oQ+j&X#m28 z@|u}U8fja+eR_Ma`zh$*wDQ6`PY$I%S5}wqmJ7TIS`?(49nqd0bx}sj3{zX>#;V3n z0+}JmXH6b-Po$zgXr~ADLiW;CpZ}V?W&kg%2ZR6CS`5vocDhibwCM^mt&Oz&knSi3 z^HPxdaYSlr#Ssm<;{QgAJ1X*5^wHYXMsOa+m909lc50(%qCheZ#eZ5E>-eTnpD>$0 z3&($baEuuE+^<*bxU8l3^@a4X=Wq^zd8~&*d*f>fdxTk#vvB|S77h#ZSt@45E}75L zt%v5Z8BvJBE2AT0Tn``Gi?eLv<57<32@CmVVS z1kf(i@kCI%;0(Z~^P9E!amAjal&wO;mo#eLymNaS#+T&tyz@c*0PC5&^)#O?LClt= zk;T=X1Z=hFz-7yI29(7b8UQtrsC@X+L*r7A76IAI%L_1(nQ-0DW(DrXKdv_`HYA(f z00Mq?l5DokSdCatns%dNCzM=r*C0}Zj%beIp)M3fm5*eyg9)a>&n{dQ`P#SI>o+aTK1=}JE@yEBd&ZRs9B0Mjvp|&qi|}q zTkeis_g?r*=4>?Hb?T2W)mziq-O&FhrU4Ct z?aP)f*cN5e_;ZbQsTk?`S=^z?5b*1^F= zT(bz!FqyJpw{AM9hcecB^BOT~jtWh_u}RBduvnS#@t46@>i(c)S+vJ$-n{R}Sh(@{ zWc)&_HasS2<%rHA{&6c+uEIUwDGqD(sROUCB2^VTUOd2RaIzst2|8lZfQK9fu#C&p z0V@U=MayB|4sHB(jOQ1H?rS+j0F)jJ#S6bs9|fh$+w64Fgo{V-NBawj2$fs)>h|=}%7I#R zGg6%af-v7KLRbn}Ld+fvIhEPhun?n33T5*aJ_>nrrcpA;;_YvBW+nT=XmTp~{FDhq zx>-v%#VjkJtcN9i7!7Sv7P#aA<+}K~0{Sze0(}w*5$3bV-DyT^D^_2wV&&2i)FgPq zl4Fl>+o#B@HdX_cw$YfaNCgiSUdZ<~f-Xt#MI;&xRUAo0#W<}3vE)iekUbQ%#rTz0+?0~>axy*U@fjr^ot zQ70HT>VRQ?IxjhAuF?=_$_;iZ7Frn^=ciNAHyi`4Vg>so)a01`n0N@IU4tyZy3{Fp zkK$8k`i@zvcSw^-Ka?p8tW|&>qluX`(ZLwelgTF}B-cBa-nPGioTdKH-U1C92R%8- z6dQIhOkBQDobIfRR4qSC-1vPKBtAZE@3`?9GWd`2AK+f*1Me-GewYmofh%Soq3!740B62PR2CHO)G0Z*Evqo> z<1uT=VeRb=FR2jp08zE&v47q29#q7B{%(ymVj5|NP$=RXw(kpQp2Z=h|Ax({_k-|2@PlseZBZ;ztB(N+ri z{lPZ-l`zqn0M=}wVyWITI6;>7M1R;Evgi%$4Tsq-Ve2?_IY;}`;Ru2vR`oJe@?dEN zUa@Un5;NxdEt$mCgD%15&crPbFIx`c83Ua|D+a0ik@{$nzrh_}{l80W(tGnrR#E|h zFq=D41Grq=yf11B`;C|l&Z48WAc5^SxOVMmDhRoLaVCv2XG`6E1Z+?Umr@H8Eu#kV zsFa*~(k*lRZc1Hq zp`P((;!-y7%ISjb6Nu{^Xuv?eu-fw25fPL8xbox&4j>CRl7%9H;cVA>%wL}4`Fs5Z zeL6-R*v)#btGROl@$!|J+gQ$oo;}mHUnN8y?-C zO`3bj0DC8w-Kn-ts6aMx;ORXjS|?+NtRB)2B=PjN6r* zL@sP6X-r3b0XK16u?qXQK2nwI>;GlKzX5T;bfNi|XB*K8ce@MJ-ny`uyi;tmoHjcW zalDMJ5C-{a`=6`pq=5ILQoAzZW<;T-MtEQS(7Sp`qo19fSxW;+i*qKxjQrr68^Y2g zHJzBldKBS0Vvc|P<^9z8nfHCA$j#*yu5peF^4W)h#bzZoI$?JxS(5VN>CuS;*`TA7)~;z37NK3?yc z8l)Mw|7tpzW9C@aX5hI6Lz9?{7b-u zgMif`>nic1IK4F#QGACoBJ{8e#&yj6@EZnPhkR;=${Q?%J`Y23-VE)OU zNGoX`x!h@VWcm#;Z^a3wE4facvbmYPkj;MKwnf0g4573AX+NeeMI>z%(80t#6okE- z4d+|J7BR*{mBk$TvC}%eHf-56`j1QW(e8);2_{|PR_l2B7TU$e^+=^dw5sMKOJOgT z08H)hi|9Lk(fg6+*(GZ5zc0blFAp)$JNvo$M!kmDnjTxJ%=#ANr0s5EkBYVdv{wI&ZyWYPhfM5_N$HLVn%`)eIssRB2OAg`&YMXEo9xI zzew`<{PzBcrdx-bU(jd1dX-Mgu3=j`bUc1{TrQuK!o#XM?|Z+z{+j-NYGTT(ulqpL z?gbRo_`LL9j3!IyI2`EN7cNlzV@h}aTqG=vQ{S!QgD)wR^Y|bOBST&ei$ddGOn#fF zS)ep2_F3ysAcF7Tfan5MoP}Ldyf$@`%j$U+DIa90er0v-oI*HyZkXg@H{?%~>iZ75 zV}{58g))Xj>LEQSqx6oW2`-bfr)TIZtmow6zH0C&AmigV&av_f500?`aS&$j7;M{2 z2M0xoxL#t?kMzjjJ(;|xC{My@OoZ27{+ z`A}11I4F^5P>2=afhao6i+@g}M45o{gEBJt;~oTbR%jz!r*6tBeYlek?Lv3;H@@3! zBuxN^7))4<&Q8wUM1p>=M9X$loJpoHA`R;!?RI#H0`gsZw-@<677O+rbTHT&9IJw3 z?snhb{u7Gc`kgZFHf`5FB-R{fKEhVAcEu+2>lp3*d2qk+-=D^pJ?OkoS7-7J)>+3` zo3e^Z1HS;@NIPL&S_ay{i^#0(0sukF-R(kaKIqmFGQ8s)%4?p*~-9kO?1oQ#?OxV(8iWSnDZsOEJfQq^$oh%L)s zS6Axa#T@&fhpo9nquiL-qy!iJsYmwHMF+%=yVA&AJ~OONtRxni&6UI^Olq`rdm`(J2+h~|OS@=WCC@cbD=Svf+jw$Ld_zw&RGpz5ADXNY_$~C3fm22Qir!I!4_gKC7Cp|!#3dYqhajz*Udf~y-a-_jR)8#BdgStYftd5X zx*IO5-jxY$+vF>kKqNLn=h9f#M@4GOsM6-%(fcE$e#7f!-Yr@PH`p=#7#5V3I!4|> z{$uqgQTqB}%>3XpioU{D&^gB4cpO<0`iQxR0Y!y)T6wvzN~?7<&@kI{OjQIH*GhR$ z7KD9<9!euTmIT8&l7tzJ@Id+@>Wk18s)4-c~$TF-KEV5$Sfs=-=8G-Vs z*tep?TM^x_w*M3`T9=c85OLebEq~z}HeP+J!SvWJ%8)RwLcy1fzl7=%XGm;HGQ|m& zoPpc>zM1!(Dqp$0uK!xPl&bc}T;ay@Wc2!tDkGh#;Mu9ey>ht7@qOmdB z0B7Oi5`$3>w)UVL6nfe2VuuURdIn~?d%7)cyo>nevm-H@KNcJd{r%++1Em+ocI_He z++$tvmB|>h{gm+^a_GwWj#qAgtE&P7Y^0{e(=|He=7a$E!6}+=n$7-0;QB-|MLBu( zI!nhP+XuU(aC&CH@DHNTyJ)sQ2|NUha*Y$m&8Gpw%2-*JsP8R7yEBoId6DkW3*-90 z-g)7q9SEM3Q|mHUQgx4bo_BRf0@0{8$2O2Dp{XStK>HNU3prlEyJ*Ru?IJPS<@W@N z(3@U_WlI!@I#fY(wK|P-E!p8^!ax|6wXmp|{`3-hgFV?aok_ETG>P4Ij?18``=Z$+ z+k5ZTOGF|%( zkndP32N;l~OL)pn$$uOEvHEBeJ8h!+b#O#`aP4NZJ$3vI?M=8Iq*pMO)owEQczoOW zbt3VkH+O=<6V$A40W$XQcRc<_FkiAF##nNNxVE-h{hNfZQB+BZo?79{0Z8AsRl%Xu zN{`A_qR^gcRE!}ss|7i>*|Dw8OE;<<9kyE=FDugV^PZMemJJ_Rx?i+>%L{QoVa+zF z$?k&a0`oY|xKXlR-NMT^36ycO*RZm)V$uhLXgq7RD@z4ZN~4cBUHHu-=N5Cz;UuIa ziCm_nrNVs=tdF(%f7d2=au&1WC+dH}mj+U zI`ESD7iR`+y6$DWbW457oer4DBkfbo$aF%0u<0^uZ@@8by)JjI`ZlSSEm0Z6jzQ2_ zl}<@7%zRPB*<=+zl`FO`jCl$ZO^$OjD?z06QKgL~^26z@J9JYm(f_g+Y^D63n~S1e zKYOGrO@TmlVD1b&3itVN0gVf|EwrwaLtt7#Q#XKC($_P@qfCcc4hydSQI4%Z<`~)m zV?8UKi(6p*MHJ^S5RFh>40GZ@Itl7NO;KdYKHl-!JUB|n8~*~>W_y1{sj1NiJBq94 zy2E}9PrL2N3U&l%r6rnlyt-^Xp*W$+d(s-PzM1Gu1;{twX{G0^-0z;xJX@)f=e?Qd zXj2;#W;un|*K}klV5`;n3WT{i&s(c@8wEl{p-)u@PYh+`)Fb-)_~SJHO{Q2?S>{aI zQdB5wk12f{$=$%3g-vP3DWKtNDXupG@KFrwH`S`UM}Z2A){%4)fOnWQ^%hlauhaRpio%AT9JQ$h$nna%@bW@&rJs zY^$3U3zrzyu;Id=D!0aJxu@bH;QNjG{63F*ouGI%6QWqx!a9=>tDodk$ybq2LGz>R zQ^(2M3|nHu9re&XPSD`G5(%=jCK`BOL!SNw=6hk)9%0h3CxOgY2rl?Mzx#!=&ATvR zfx6{JhaU~gu@5PT6Tcz}vQFKG2{EpSqM?#B`MAo|RZ0bY^rDyiJc?Po1p0~h`?p)u zI&Ew~L!*ki0uXBoxm@NG?cKZCmlXN?N{s>(Wz7wBG){=ZZ*6R|F*}cLuAkYnbZEb& zXuW^MT!EA2DDuDrbM@_$?5E7xQ{H$iEk4=Or2Ki*CFT}pR{%g8th2E-A+~;;uUD_-r?V<94+UlGc7)S2 zhPBVg-6oXM1?Nautilx-M+Xc@Ve=c;PR!0~=_xzraVbviq3cl{fD$gdRy!2ymv`z-$*<>$DvU)ei6>z9mh z_@<+&V%aHuAgx~SF@TKOE3Ch7p8)rzyF%4I9r$mjk!{FxvF#v37;Ib*@ zlZi#%<)Z_cB4CkY7hbAMv4h!CB8#2Y_}T?r^TW!O7TpXpmJjqaG^9Oqyc;7+x2nWZ z%?iwpRoD-QT-D>M=dupBvk6dyhNAaD~#B#H22|rp5x9of# zUXDchyZH18uj&2cx(<)2T$UL$ihdPFDijhE-!+%A3H_^KWTd6nZ8P!D(dK6Sg@U7| zW|GT+FBFEMm4kwkbMUU<4yZZ5l7NuCj$%9{v~<2IEAuMj-9KS;>>W&hFqZ4^3nUTp znL6|t@mWq87YINzTfit;R(0#MKRLjcPfG<8mzIZ9mNtK+9gQ^k!rkEDVm17HpK`PN zq2KV%)@`bO0oPR%eK(s0{CX(p?CwR9FJh$^A*OdF=`zbey3paqN0=79A_$d{b>kGb zt9UmIbkZdHcaiD=z3#>AuA89wr-;p^lPw)YwQv0tJeH>o4*_{^a7rPw%?WZYWn(E#f$oLDhVz54Yz8*57CoSXty+u@`;@yxvTHKgMb8wsNkG{_5n2noPNR=82a$k}CY$ zE4G(juQFbPz2fHUM5+FW7!?fkC8MTBZP|1N)``7>i?glCr{ei}_)9j^q`b5ye!`6{ zap%cst#yJ8mVDI+eI54=+{Zk7`mi(E-$^CI`$g(q4kQl^TwsL`x4DizcYCuZ9FG09 zvZkkj!vnd9jUhHaG>NDt;uQuPLj>E*nsK(C;ui6yq!@$4zzF(TJKjUZOew-#_%;pl z4#&+ByKv!;e}pb#T(Ngw8K$+&aN}4>jLiuqUvVIL2EDe8H;IypD7Y}WbZJrm&$<~| z7Mt_HJYMEF%X79FWldg!|%MiZKH_#>vBB)wfZ zIFX^YS~;?8Xr)l@^N-zwA6;bUw_wC;>Iz-6l1!AZLKPHkNvQ%d2=XixeM(|Fn`6aL%Vk#JRt)tT5WEbu&0;4im=j!`iqf`Jp>X;#9?7 zCJlqlqNg2{vW|%g3pt3W%LS}|s5Uo@(Mj^i;cpEjoq6hUfiHtbZrv29C^lLw|BO?b zTd;XbmkY@Y(|m#Dn8kx+i~4$eNBb>m+H%jANs(C$(xT9)MyKuqe~gB$%8>XRE7p#& z`LQ}c&}M@Tt<%ZBlfuvWKj;9U>mzXLm&74tl|P^!hx4W;A}8WBuzM)4|M~H!qxY}U zl}$zBL-FAf>V<)Fo~=4%7@LOPWl(KixY}h^b3v@Y`|Go08=Zn<0%B`1U1{}s`1n+1V2gv-z0kbvziG;<^AIAxL$AXM_nZwB9a2k zN@9UG4Bkex)lFhI55KX=q96%9-oA&-&XXs2rXxkE{Kf4-c?($H$KOrCf3;L5HS-m^ zp8gnkbVOBAjq|DIgU3Kf8m9fDZS8C#O~N|b?D`^cn3xr%#puHp4x?`TvO7b18HVZ= zh!V#g1BfWhA22*1#CYpNvN0=HemI zVaOpU2)w>eZ#|~yBE{8EC}8bE+d-_ucOI zHZVj-!2)(qL(ZLO=04F7l+B>JmlgErd^)|_pdvAVIG0L}6~!P=3&!sCnnAn%am1$$r-zC^ zP#fIR4YKZfnFq$>K?C`l0T{X0G7jt_RnkYFU}*%G2Cdt=`-by^U!7)Esv$oZHo)uMUB>@!-{S<~g~{OX z_&Bxe*8c6@$1k7Z!X1sfo@nlVL3`IKzfgC{mG&B-k9W=NP~nm5o3(&ld89g}YaCg) z$cR(55zHi=ERCWr(w&U*?#j#4vHJ-71bcOo`A;!| zfp(0|k;D9Sw4C|wOj(M#HB=*u-&u2dqo{Cg9aH8O2$9uBiMIl;K5zM6QJ&G1dv{L= zbKj%NY7q9t7mATzlbDTqS|(C2_6W;F9dP!inL8cOiF_{p({fEPt>EcSt$z0#gnHd- ze*-#Vm}l&Nci!UBtmKIoBd1cRPt|o?SrT0l4?O$(!gwqrXoK9?0m^O1+^fcOwU1rr zr_|V436O)F>W!1l^Wm$?mBk49ubj}JA0n!!srxG1*ymaE9G0PpT5;svEJ7Mv`+Fe^ z)3BSy5{@XzXsFf14p-Yde+s@eZKXfRV&8@RGgjDTH1@k>JZXd1E^bAYi9m+OkLj#?4tavog4bhx&NAR;s6b`>vF zPF-apS-Q2j2rFy`GJ19OC^DKjH?e(nGK+{sgz6N+jOIljkzkK8AT0fa_XziW!u=@y zvsbjR@oVqO$<=R+6_iD_!n6z7iYJzj*B*wNoUxBQD)_)P*25C1dK(&kB^^LJmw%`iw!J1g(W_~_yyzc0N6 z?r$-cr6apWd-YTa&kYAcQf-d}L<~BY%B7rgP0##kUn>4b$guhvoK2t$sEXhUMgxxT z8wh#M&(1o42nqO{S zUf64YiybQ3=)39M}qcUBQAb%V2I9$HsI<>VBTlcqL=98Vo}`)(4DEi-TF zrmbeu+N!!XevD2*5P2a@6aTS`;~Nd{N6UcEJVC#reGYT4ncD+k!;ti~-YaBvR%-fr z1Ecc<^G)W3W;NseGDWC$cx1F)>Sy_SYWtGZ(iI8?Na!jG#bKX4%SxNMYfy+XISfsY zzrk8J4++j|(^itoKKBft6SZdVd8*iSh5sPfr&h6zGN3f>bN>c`d{)A~8R_@m81Vn* z6g<|~9Ps#@qGRG8E;Cj&7vjS;CTo0Ick_Qn8Z|2K*wDyPa0FHRAhc+4pPGCPBQLh2AUZmDfo* z@gD=pz3~JF#hh6{AY9?$V``w+OE^Z3{**)-7|PBq*!#)WG^ksQF%~<_O}&(hN)Z2) z4Ea$w?9g)NzUT{4ex@@5tZFjI$YWcL!HE8h9DDZ5hxL4-{}qak`B(3zP^~S z6*B9aA=Gb9i;gRgn0)xAZ2{N27JW^^`0FUPckjq)#=0DPe%#%8z`kCRSF_91T_3SU z$Bct;l8zw-|xq^E9<@hz_4=6wMy+!xK zbzuvt-M0yz-EZ^~4l54oI|WDP>P>VEaUQV=6n!@ycjW1ZVJ%)8emB$!i+njQSD89= zHM!}&;?7qB{(#RGS5<#pruzGCkFl|-y>;G0K(@ljW24NLNhQL=ESf5fz%0O4U=@&u z7UgSdq(0o~v0+v+_pL5@*5VlTDtANBWG_;#KC+Q&*twq$3_1>e_jIs2 z_51^%(xB7W-d>$k4meeQuHQZU#PygYCVAAmblrX3lya%57EmFe@wIf|5jECq&CVRl zG49fsjOWqyxM)AcRnkC^3&jU>NU2byWSb9Pl$!Hxj9^o1`_`0_&6X{n=j>*h>+=xh z&-=KzB*kjnIz`pm6ZmU!#w|&A{i3c$8uiC57(Hr@C(k`kPN8~ zwS!PrXE3$A;^xQC2fwFvWMw0qsR>=bev(AqpJ2PeG6%tw!cul6Tt#b0m~Tz$^1}I( znm(rNkL~VIB~F6#wO-foikl2iNpE{OUd5cE{9=pf>o7s@t5k14B`{}1oSHiJ-a&$- zg0ec#K>-z=po4TXlbXI7lVsh-;TnEw93MY_>Gl=$C*1i*PLa&{KF`30N?86LMRpezgG z)#?2Hg%Bk!Iuqz>+Vl<9*4w`#xA*WTS7l|~N!t&N1AmYj2mcU&vPwV4e|PUu$HM}( zPkIE*0lyk|ig#*e;~i}$BF;a%f96`u%tTO!gY3=S{&3JwKE0*<*kDqb`z<3=rrW~9 zT}2t=Cx^Ne!8j8nj7~#L&NWSfQYt|~qi4PMf`uZ%GXqRE*RsR>)lE1zMZ+{-&z=LL z1d&;4$SEc<_D{{IZNcc$gS$R-R=UgA&R$zBITYGa2nLip+xaOAjRsAi#+QJthr##%Q33+6SZ z7JoZ9THUg%9g33Hy~YnRYRaU_s2#7M(>uD#%Z?pYEsT2RBCoz2}s&%YAw2qOK+Rf@8 z!VjBv{S1+dn2x~LIFE)sx_=GmcC(1S+8FN_de1e1n={$VeMG~>+lXJ9TgO}zvck9& znkVj_uS4GK{kh_)Y2jseaE_t;<%k8tsIDSI=P;Qb>>Lu~{Km9=6hH9chWqG7NsBA; zy8Wij111j3=DUOfB68-dDmDMTy#E<=xts9x8zpb&+=YM48h=jn{Y;)doxj#*+Ff`0MA`F(hv6^t*WbfwLmV}Fqvs8? zJAwX4(@rz0DQmHKA7;O}iU4@d%B%~1__}ni#*3m54xt`=`5Qbk_*~GL!(!KW|8==U zjqbZZw>(*DSbL(KuxT(D)xSo@9%ZDKi+=OZmv9d2a4=0HHcMWwO-gaKCl<)69n;Z? z@*k*k{K%8I@J`n3eO#Zu?Uc$lYqMwf)JLG#B9IBkzV#OA{bU>tw%ABZ9JTU5lY9Rt zW|SL+0RSA|4ZKlDf2kNER6rvo653@;?BXQYy_S;LJ-4uP|K7eS{Cye&4Q>9!@sd;V zi^Fz*yc|dV_o}`Td{u&Rbo#m*JTr9-v)o+Z?PX9FUe8Ks9qeq!p2)+_+YC|Z(-EZ>H zQX=;$CX+@5s}3gvLVzAhpB}NN_-({I;vovTGs#K z?C`F%y%?$0V|X@x)>CfeKdH{bm(lsvSl^3?Eod|4%)T2``|>>0>g>n!@cef}04224vzjR%H8CvF>$t^EY?cG&x~xmL`RKXiU}HX9Le zMg>Z*Bke}>H>tuu_2})lqM1kn?+6QhgW5`OhRQt2%byW)QBeHuAX)6<^M!_ zA^diGcYnW=u|(f^g%SS{ajKCW92|YWx6n20b58b3WxLuI{DV`TwVM)ChIMI?=C~k( zm=x13IEG%u3?w0~UnMJM;+Md*M)-BUFqSo2$h#*ppK5KTP2oF70ans~2H*CB;NDjk zfx^Rhj*XPHC@Z8fb^P?|^b!QTT|<7ZXh$XfZT@lce5G6S;60*`$;xeoohK)uTNWj477ARa`&ikYQ>DXRnAT~> zrw4YRRob-wc~=L1Z9X|&e`9XTa8*sqt$g8+?-#ipRrKt+BMy20sJ#7C1EUyAuEss} zrQ<{uDf*5g*tm=$n1N{|F6xgdRP{jp7b_<l;T!I{6&YP;epcE26iolS_Nb!O~X?h%% zr?pi9N(7(^Io7B8hCD5VuDHA2l03JMRWSS=7e%aNX7le;_h)M!Lk% zq|oiVDyxR11%LkJ_7Lf~)^4BfIR!I9t?Pc*#!*s4Lv%geD$*QtmSWBeozI_DB)AS5 z%TLpN;(k~a_90r8|0)3oW#&{sv`CgR&gkV+1W8d8j_78faCP7gkl}9id{~?ev?H)j zR1Jx9J+{*Q^s`T(p8>Rs~a~q(JRB|2IvWgL5&`8l*Og z0GT3Iai4;YU{fNX9{0*@yf@yl%rg`#tB(<1f~4SDOEot>qSBzp+F@WbUrdkSD1LM? zVS0_}CUEP10~yd@XTWQ#{|z+uX&C?FQe}TxvKgG+KH0y2_b>vWiLcgVO}DYi(l4fi zv&q|c`jZW|XZ@)uymcw_<}BRGa^q* z%RF3mtLl@Gcp=^B6dks!oIX{+e*RgPd=Ygl3xqj3RiBi~YnVqpB1oU_iAu7U!o%aK zZxzmku)F$sg}J8d2`k(C)mI^nWo31O_0yv-*yezI1w14Ya@%Q;3FL>U)3l;NQ4nL z-yq+<{{8Y#$XkE=VO~D^&k(Ncp9c-Ks!(c8*6m4p)4t>h(hnR*hn^Dj+4Esskl5|TmJwt0y7qH?Mz!Y6<_)rowQ36WdpX0S&VE?L{($w}+C|&0Qe%hquzYYsq}i2SaGIlPLxhRt*nBUK#vyTci_Uh>?`3{_ZhC-qy;OhQp=161 zxS_ymh8__6Q;BdhT}hEf8L4IQUzAWHCUF)0N2Jeu>7paP&me|~m6h52P#B}y7p1xS z$Sn2Xr2RS)e>SUMi_PVZdxW1HZeFjOsjU@m-4Z7xIWn#{VJb3|9euiYRIY=s^kexm ze6LI@xeZJA)Gfs-UR=Bhor&zB&hHh^LnLH!3q;{}@dgMOV3<0w?@Gg@XCnr*w-kC@ z1%f1gmVHr+n>o8{ZWkzq;6e6Oy*K&S_vSqASmgV6hgq?^t*Set>a#|#=<7$3vppTM zc}7}Z5}Pav3ha{la)tZ@6vMvInw-xASk|{txh#%0h zR{xeHJE0iO>+bN|SitOu&;XHaL3cW~9y9^TG|OZ*!@?uFK2dIIqVwxf6Mjlad8(; z?PP4>`Spi4T+Qt(Btc)a)G(#X{v^*>$fnwEZt1y3UX(Jb1Zcf-1Lo}YQ2 zpr*-$jfFy7UF92D&U95)&k*ocSDfxyzeZ)#ABQ499I<2X=pSPTZ4#EKl+hrFw2(8g zUJI@{M;z;mOGDxq3^$sgCayda z&cyghKGRI0_+jdSIi$0a6XI9~Q@pF&VzP^o3+{t!4{?oJ!IkaRLE)W?3#{t9C+oY` zc86mwGS45`JwTZ56BcD&7iW(qXShVD;$xrkn1gXidfqP`?XhQKOB~Z;vL0_jU{G&R zfY1APds>Wm*286hgXMul+KX*)8$bU_OUr0Ze(iX|RefBkY^r`n4RQAQ1B~oqcKwx) z`QN_YJZJH~iS3#vT%A?Q(v+MmTe-4)5b4SQSn#Om8>=!X)HIYDYBC}{vwWnai4|$Z zA}-!%!90A4QPkD%0mJdWx?87FB%dYhs(GHjdyVk9VD`y2e0?}O|Kf86bGx}kRg0gT)cT4{|MD4(D>HOU`D{h(WckqPTqDav&pKgYpej2`@>mR8BbMmViM=@bhaWk(6-H z3Pq1y5AWw6cZr)T!TK$g2>!Z-kzH47CLM#W(V|t+(`E5y>(1)9N~gY;m>)*mC@DhB z=z0pl35x*)_A1vzd6Au77QwIt6JMfVGg{#%W=khd)MbN zv{F)NjZ&U3%j*F88}AB1UfbTm9pF*`n%;aZrWPKedOeVUyXWuMUqt^-&K%EokfFqm z-tGhai@a_5B<-vR(yDVw63VK)gHE+OPM!n(yVO_43d%_d>fQZ2ext+Gze<&5l+uGF zSO<~)OZOs}qwhCf+cF^0bZNXj+yy$9iEYdmO>cY57gy`|G#%hvZ8^8={$d{0SaX6r z_Knpp&)}}d{H|}_98v^>7BI5dq63A2n`A@E4zF+vj=lxz8&}C6Cx{V^v2Mw93g`3W z6}j0(pex?+#R!3MJ0UysIF23;xYQK%#^w^iGm;EDj&zFrkP#Fa+7vuoiB+XPoD8AO zPcwoEm>i>sFoE!k32JqP~B%InnT5`cF?u)&r%u5y1fD6f z`J#tZ_~j=1n@96_yOtemdW|))!k}xW5`{hAz+r&oe_!JFZ+3JMYG+lpcOZ?NM% zwHnK*Bo^r+1DimP_6!fDGU@mKgr`2QzC{=9uS-O(ME8uNi+wmh9E^M+kCjMnl^PslPWmGO)lMWPC7U^P zR-Rz_))k&lFVAxr(V9HRZM%=U1t;ZmC&;`u{1;ybvpntckn6<>9sJ3Y#c z^I1A53?ds>|H)HPOUlX>lu*g%<|I%CkXlMmAtc6Wm13$;i3j}WzIrw+1-^g6Pk-qsNDUD`GOzi*Apzt+X_?kDF&X>t=ULjM#~n z8?SKDWMoDa`6-jq^5fglLOEH?2lpXPD(C!fy~5j{UYpk6sn)-3G0tpqtx<#e<3__B ztyjMtr5(D;U=KpV-y0#Ze1R$PL!kaloiM(nv0IT}TE^n&gaKo0R8A@L zFU8i2|2f{h^Np>Br2TM1?$fd~R5(s9}9@!0Rrs-)puVu#?k;xm# zK|hepi6D1~82--2iB^oipiO?d z)^Cmlqp?C#%f!s?_|t=r#F-lO19L!Nj~!35ZDU<`(v-tY`zPA3Z!9xGYI!V(@3xO~ zUJsnKLXm!NXZ9b4)FFl!(KCqegb;;>;+T(y>7h6f7zOmoM2Bw-4ERIH2%4fR}saSH@9Q{*J;8ihK^bx1!p$)LE5Z*mA73cYA1F*W&& zSB4YBD~z78MUGW;Yss;BKJCBSoa-dQaVKroG7=hjsFZF@(M~>AXJd+F&J~6VZARpz z>)n;2+ZC-(#>BU;CXB$2O(_1q(CIX=Y&VDAnEBYq`)Kj_jhZi<6tNL8q7>jhd%3iN z;kvgAKpwPr}=g%ZA;z9}A!Z77em)yxn;g5&0Kc z3dN{}MqGk}e2N`(i;KheZEv^hTV1cwudJ%%p(+U|ncLpXw1{|>z6B|YSpC@SU!zmk!)n?RdF{Lu^cmLiH(9Vc0 z=x<^Ftq;Ok51ev)hWYUIo=rh&hr(+zvl=+hM+_Pzj|OeFy`~Pg zTfP1a*;hFPEQIXQB!4dE(2h>-d}S|q+dpGX&vfv18i1X(k8^h>v4&7SKZpDaB(e3{ z-@lI+eudlaHMzwof(W}ta8G%y=149`hx8abB<*V?-IUk$wnOF$-bj-_^z3beEKIlr0|0FkBhb-*E+4@54hl zWOZ@)6=^`0aXDA`)$mYIzWPvf*XxXah7#ykHy3&t2l#6oxWMBB5JX$gPfJd=0qA+5S+?P$VJTDN!F9Mvbe6O8RF&f21ZS__*Wg4ZG;<|5Rb}PPlx=a4t zIClsWwcoDHF7Q65C=%Lx4L(!NB91Slec~6yAq3E?6x~90>2g>mMBUHk^FJ}-W_gmxi{BbhzV3WqK?8wM!YTYY(| zejR<=YSTfXr0-oNC*KY!&|?-r=tJT#dZw_qK-L-FP|Fdq5{c{ISqKR@)3!$KI^vF^Y8qi7&3r@Q5YRKt>Vj4+HY^#jMqFB+?X${c!IiU(tWu-#;+O){(i~jG>!cY^xm{tzI>G8z8l$r8%UzZv1!< zRn)bLUM-whpdAn)s%a1VOM6ztgU%J#w~A&gA6ab6zU^Ef2EZz7JJ`RudP#E~O|72f zvmvndb)-yaL>*ad8I4WcFdDpGXu7??I)S&ys>A?XN_kvB#WFfOgFug)5{l~Q2<8b^ zbrC#?l)SCye;KVhO2nBeq}egzSpLUt&2+}zth*EA=?7{}c{}P;2QtxSABG8o3q{a(}*bsr1|XQx38Q>OUm3?ORna$jw7RSFCH$7UN4+K3|xP@c;8d> z&^pDzx&8U1KfO6m2s>E#>t#=kZdJ{`_+w}H(M0KFzwhy_YE8En&*6pc;!lyQncmVf zvEp*-Qr{`23p01p@PKB6)62~l5L*I=sNkEmVO+5YQ4>?3mj0~(Yz3R~$9A+0r&TFN$qSP2sh-z72#~097d7&9pY+G~ASO5Pk zK!+KKoj`FqR+2XD&`B(5gc$Q)Un+dS{UJ+Q*4r)oQ4dYu!DplUM*p|Wr3MGzX z%zjp9b#~I|X%$R*37_UbeGtf)m?H@)li>1qY0bpmeWI=^MFD0FRgxe7{_Z{>GX0CgCoICEgE6Z@^>buzZH|gTIxa1pA`xP{LvJd?jD}Ij&VlKW%Cn zTPLoaAU-ibprGh7>7?nRxnNK{@tFC2-QmL9xd=9dj7u%zN1Vglnxwj~)-ihZoc%`W z3pm#8tUvbF4qp<3kR_Ba($M+zjh?Mjxq^&hl5x!lVhRu%ZK#IXDizWpqey!~(2Olk z9mpR?u(reki9=If%PtGqB+Dg!FNd;eDhIMX5#zvM@$;?6Tf|Kz<9t&iFE5dUi%W9+ zHclOnh=S@)Nl%$mg$(TNl+@$!LtQhNnHr^VXK8{<_@$q@=EsfYB}>kjBS5cl)C)n0 zyCBYAHrZX`s^;Q2%^;(U=2yE?Z+$>1E2{&&dA%EY=&Jkk*7fGP!n7kL_$PX4TsTDP z;zOEb?qONdGrxp-DOlvXu%cw7qO%p(h&QKXC@?B7WO>RFm9VnW?o0zTQo)5=CjL$_ zcGp5MQh-@H4jvkJP>BY3MQEL+XVoyhthQ|9kbGu&qUdEmY2!g#jX)0W?$VfuT1msk zwORtIC(E!w)A9(o-ict^dAj>SMjLb(%j$ZjI1*Hp&Wcpw zxT`BFkO-8SE?M$;0I|ICPbE6>SUUCk1aK7nHav1_#Zbt$)&Xb~d;QMxm(C7>MOv6m z8w3w7oV<@i9?*KF87aLJ!4EUncV7=vT5#5Dss&Za_{l?Tr2?WA(+VPJsb)AtHE^9) ze>X59`u~*f98pmd>vaUe@*gz0={~d8`{UexU&qA*B?VOuMMOvOHoGLxiDb`ZGMY|G z)^AE3TJ8Ln^UghSi-FL(F1&^(?GG%t(BMds8(mLMe6o4#?#Lf|7#*+C9 z6p*#VkaZymS!BM6RBlNzJ`S3xM}Pb$@G9stzkjcsel+P7jwhikc*+Q92zvf%Q9}dN zyUI|s=Oh~4lgvPY&6Q|kDZ3i#P;80!aFIA85JN&e%T@}mpt5iLTptGq|0YS~Pv=xH zG%Uf7{!vrY76Ce$3zCVrc`RR57%yn!!4O%~FBx8TlgreNrOwfHd!8rI8F;1WJFSbOyOIe-+8JqzP(_QZ&NvN3jHH=F+#m+ z{&$v7N;W26u|2_Q^UxBG?nM#XN}gcoS$UcA^X4s|+U7=VOohzHzWvPkCN`TnT&{gn zFCo<-a+AhLTp1-tw2b{2Ai%O;^PSZu<L&@bM_>FH!_1k3aN~kFo{zQb^GI>6mF731#OWq7+$fwQ`;O2&O~h5#$3S zy^jH-^?se>2+DUfzakhnmwLv!YCnxUuIK}cB;m@<=B$2aPaOlE&y+^S6l&$Vu{OvR z%7Qao%H%#rx;*iq_H84S7->#aJMObz0P`1=v|N=x!ICa4fKqkV#^u1BapEp|vFphf zB1dOR09Cg`3-@_Y?34p|lYOe(7H1XtE;@Rh?g*+uO$JXmd3FGdtNWz7#_feqYMVa1 z{p&0v81%UMGX%N-5*QRs?APjX65yB}U}2Pz38F0qY0D)c<&Iz8!|$|_M)ANG)y zzK0Ue_uxqFaBHZX(CZ}9k=aEmdH+5*d>cEt-qby@$0^OD3&}&$PO60*f3zKWYZ}oc zZMu7_v0B(>t}62}BJ=L{^HTaFnEuXiLUD4aJ|c3CQzw!pttc1HGAoXjptUNWfb!Un zk7bqxt&C0TmlH^xG9_yIo$xvR4E*SiA(WORuEcv(qZiG>Uq0sAM#F4?UtGFQ?%z{E zDjhd-0rQo_NTqJm-?gQI-IX~Yu*fTZTsE&)g8_t|4-x>!~;e~wA|OPPc|?E&Lj;5MgKqF;gwOJ;)$Me~go z1E&3CV(lOwXQxUNPzHa5^t>A(R`sI{9f}KCX&Al)$tT9hMdnr5>B@&yY>XsYwRgS^ z5|d@cY9q>v?VGwz-0DQX=8O_mw+Dmw^;#Pv%|{hf z+QTwa&P(T$(3m>7R#Ku1f4}_4GKnPj-6R_;X(%OR6%&o3@EhUy6mFsl7KYh0B?Ucu z9i+HxX1{-ccDN2Sd4;s?OUZ1f4_Mlzw*tta%A+W&4IyzU(CFB1c%A9Ujyx)FRwuS$bolgt_dQ(E}4`VWU=Ak4KLi0G5s=jT{z)Rx-HGR{t4 zb(Ay^B(yOh=5ocVXtTeyO!0#xk7zB3Pyl6d!xChV0w7h-5^YTGHGKRK@xwncIhtvx>M-ema8zfn#L=*j4 zCCS@rJN56TBrf2>G@|HmG<~)K5(m~!L)l$RzZF1aPKxfGz~|R^_8;E*#@!Pi*J>ywM_(a<~RY;%c887 z_+T1VN|YyuPHA2bKm+p17>aV^ca5XCGZpX^KU<^)*`#s^M7IyYs}4;e`}xwzA^Fvq zYYw3WivfSvp185G$<^+y`_e1)gVeisvxjK2Y%KDDlKB$&H0?Kg@_UXy6^>gc9v>N2 z71HbL*#WZi6i4I*khIwy+XIG(y+4BqCVwf>DVegO>K}eOXl$*pw)Wel%mu9OSJNTi z-;Ra9j;I6$KmBcufDDcmnIQVZF}L4S@^feH-5I)NuKKGw%$eTIa3vio&GCjNov@^4 zU_x5(na95L-%xBBQv)$QY+2O_ zP|0IEsj~Ek^tB9zWni9-_rOGSCsar(jM$9tu%0g4F>mbTIe0Ux1Z+mS#T1+g- zsBf95Pcgy^Q*ZVQ7vU=1wV8M8_bM+GJK#T(VrgaM?S=aY}Iqw-iY70&nBw;6J}1m->?QMY%5Tg z?*u;Qr!1LjPCL&pER@h8`Nu?^Tt1^ts!~wpe@j7x5~K19&utvsM+AQ&n>oIb-qMOm zmWQjrFG5fc6HbpAKo#NA3J8#pe7nSeW>6k`fn{mRMA6VeiW2H5VGk)&!qngS0V~?n zYHQC`wrePV^egwCx160%v`5>gp{uyLUz`T+hZ26|=2#PfmNj{^7C#Q0J_=losnVs5 zJnr#TYM_ob@(7c`;WnO3{m03w78+lVT-|X|oROU!PhHxKFq;5K=JgiL%*+B6ihGxq zP%?`~2L=#}ERFF8KF+6^X^(Qr%!>QGZR%bb|9q=~dqOyBjUZx1trHMa2CGTnrWeK? z-IY=jB(lWBiKxo2dE5!T9T~o0w5DDS5Pd&q_TIj8eH@+yl=3fZJ}=)rqsr8#oKIf< z&_mxt#o>#zDM!>y)zoo556@h*M|G9=;l(n>OQ-RAjy1Vr9m z|1$z~Y=6s?02S|^LP}~-ugx8IkSlc(24HjcdG5n~XQu+^e((TUU!ILY_TL z5SRosUU}7@#C~RZtbM8&<-pCS=wbfDZ?Q|Q6q|GvWa!5W_v}L)ITIuu( z&wg3#8Zdm66{fF{$E7I3T$YKE1Yo|CsOI&&+DRsac%8!e6)5f8hMyB`G&Foj@21R_HgP1nM{ON$Yl7k;lAhTgt zHXWu4o-l1~?QX++kXyIs9!@Zv8!1j8Du^Hmw$Ib(Z$?y6@$2A6zfBqW#r$#gW-8y8 zOfEML6Cplws3S);a%;G0Q%ohzk9x)OHkr1L9?Fn|A07?M`h??%jy{t3fdx?UUYopQ zzDAokbL6xtoI}Fv+r&-pBM7rT&pc}%IPI6Z7hviZblxsyZGsSN*zmu%R@DGaF%EE`b@)Tonu{2V^}I(3)SrB$7hV z`44XY8_|WMju=#xQ!R#|5-*!NI+Do8meR3AE_Ez_LVBOe!p#^Cydu(yu0MW>C}O|V zvHILcp$sxUAT$CQ9q>7*+dDY4gVE{2MDc@6wKcL^vF)*QO9>rps4TR(fEwI&jR31@m!Xb|}FeEZYiIf76a*Z%c zy?)X^U1=HT=l)*j=oXxKg8|F;vR!Qru5wc%I4JYCbYJm?CDWx|p!yuclt78XkK8_e z-(RoRcS?)1w)TBq_uUQ)lS|6{X6wa(ZO;OU-SIq$r|`x@beFGX+E7@ZsprI6Z}9uy z^hXspccU|(Xq%Hbz2wF`o%G*+FN6}+xsDCt7N8zz%=!93qUcd#m4T;eS}jpBKXMh5 za-3ltJ^&5mrd=$UXi?_ZbxuDTx0XYjKzxM|24+u{vuIilK+UE@vE&eZUQFngi58F@ zuGh->;q6yN)ZpO-(l!HrYeul<{=3x%n57k9zlm{N8fNa@FV)uDZG!%K*K$OO| zc=y0=925S&b8@xKg%vQRSqGUcXOI#br_tCGPg`*PUHQ8;y+*Np1@NF^ROpAU!h-gV z=m=48!w*9s$RWMHJQFeMnZ;R8JcDKxc-movK$mO-Y`{fA9^E-qNBj%RF}=&h_b?8{#t zJUcUV!@p6sL~%ptWJUhQ99zfu+<)?ktoAt}dhY1DyIa)i=&OF4?3xgMH1v)nd%I9P z&wklA>~R2l0}xVC{7HKOoI#t}V?7a7J@*$bDB*eZ+jO$q{>g{=1%8366+WVLBEUK; zzvgwv)LZZP`Z)Qz{M^%Zg$lG}?T;52HMLPbKe^bhZF#JF!~??ftI*FvOc<$}hA#%{ zAo^9?V1T?;Ru*8?Y+azKsmUr&B0oFj{6T*9I7>-4%h(Mo+`O5Is+>=wc<6WxZe;gL zo(5+#Q~Ue%q3L_=pGEXqQITZuY9XYvbr6ktL=ZHjs0{M#TlY?Wj`ajlJHuFuOB$1r z(i9nO6fA2pqB2yVCNDNi{Ct=&?9^l7q@a!q=o3%Z*Uh-@ES=IK229e%X`0r(qUoaS z^?}4$7J#A03KD{1QzRl(O8QquqT{_r%x+UPHZ}fT?34pZIkyX6@q_MV0{|{LGDytbjS}8wX)+OUrQmq>ya!EWjHhmRDcw@aR&IE&J=y z9BjEH9(!PNM|#ShjT$g*<(iT-F)+Ya?$6|wbh)Z&uzPe<>~?nQfffw2KKWMze5GY| zlY!>Lf^$e-Letu#kd@VOMP+4d5xqTez6DB9*^CqMbKobH(6VyZNJMRoyb{9XYQ648 z=iJ8996E+5jHni`C%o_N7c{H3(I;Q{t*wEDAFIQbXwtEt?rZE6`iEWz3D;6cEfV`J z=?NC%&yCw9uJm%?jA3joUrO9wu4}zozUhk^L0Y9`C2NA!$IHQYqyZ@+NeDWRk?Sw| zq$eboXRnoAD@~vG(D=h@bYMznlX=>1 zK{_vHyS~kHJAHkF-{+A*QP?dK9Cg2>+0}it#AlWmzekaO5tG=BX!scYhS~|tUG(>M z=zIDM932t97-Sz>PYfT%)cH<7ga|*+10!1Uv3V@z;)?M=wV8~0_3*&!mc@>b1|O_C zM=DkX%A?!5zlWEfJtsWN7OyW=!~}zDOBREB9nqG>MLAyna33UQ+7$4`d3bo~C*V)} z&E2TG`wP|O*?BfZTN0W=rJGjs7$`;=%2bBfImb=8GFge`OUkeZo$;dwfgM6z0e~em zTEtLc3hft{AePeW^3IHmL|0Qc>pWMMFHf>63^0n=ZoYqgzka8!tDB#|5XWK8nmT1? zGXkNA6tz~BT5yWvfrnxj{{lPStuYnD#B?$-Y&&8&<@0Axl;BovU3d*IUwoHcNH=(* zSOTjm$wv5oY7+b5iC`ktiDks~Ap<762TzPdx!#i*3)hvduF>H|R)bkHD_FX~|I-n6T`GxCp?9K#t;L;Z_E!17Zw~u&FIM}$P3C!rb-|r|A3F5NY5>NvZX!( znOB+OWh0nJIa46@?cE(gc4j_^y8O7@B1p1))x2g&B90=&m?ob!%IsSjo*R=SMb@$L z()=am*El3U`m^>c!5|n+dT8bOm1OPVm1Lk-EUTq93dZev9N7lEQJ3}Hw%96hlUFlV1f-S6;ZPz3=2P$R_T&*Iz#j!N! znz?%3VtyqNy4j+oA1*Cx-)jaaL}2Iyz{iB9#1+ub99MT4s3(#;m6vV!5(XJe+NK<5bQ z#WXXERox*-qS{eTqhS{k9;|Z$oMHb=L-sk5dhur)Y9PAOHB5+_>fJk@A!Vh1qr{X= zDF+ybd3kt#JFm8CrZCoTqt6<1q~g#bS1pqR5{__Ro~T%}#MyY0CHdr=8%{a>eLy?z zUF~nR3@T)4&>0IpDC#!lE1tHl>Fn$_#sY{443bQB??`~VV9Akgyg>tP;J0~p`|Z+o zMN@Tbw>^N)5cYUu7GU(-f@8oj9X}}aY?2ktQTsirIapQp-8-~XsJMt4b!4b8DvzL` z+3AzD&G%d)8o;LU;2E^bV{NchRK{Rlk9^gV;bR)QP==B8mFx?kV&Uy?4yXE?lvRC=yA^YSK77!gmjAy6gL| zFHdN}CR=tQ`I~i(e821DmKdX>1z*5*U`HoUQQ#Zb#klDv54aB+ZH$-r>JSH|U~)w` z8X6jOns3X<5ZJ2E ze&8tEnf_0w{bH}l?f693BB+1;rO?n6D*;e4ubOb^)zqC{p1pnaiBDhe8qa>7KK9(( z-FLg2aLJ9h*vD!*^~@eWY0P2xk1Lr=XkhLO*}iJFcj~z8@em5&?>=4aQUvU?kH2UG zqy@tW6pKEKz1^M6jIz<9eYoUflNBwwEtlDu#SMHXaeGTTedrW8td^K>g;-X`n*N}s z9%#lp6epLqm(tj6=W6g&7>4ayD4bJ)qCERq`vh1wgoO96?*nYx#)mH`ER$*-ZwS#R z%M;(WcEY#tC$G+5cwcUQYQ3u)nk^6W0}7>jCtla85fpV?nJ^Cd(0qIeb=P~lXke`7 zn1*(65DXo;Ip#+4^X;b;UjPZcUgM;sbpwWvQ)btvlO|?1t~C8rfC6zfiIJ5`+~O^2BQpu zFl*`S_ck&PY_p~n&7);Uk!w=rA0ry%55IO(L>(L97kMVZB(dJ+PV?sadrmc4Y^Bz-N$XKsLz13JDbm zY-^M9yi+vXom()F(g0QIwfN;R{1bGS?!CId4$sb8JdY^|%gnUlv-yZJ&5toD2_S=y zMhJ^6si16Wx!5#VcW3Lg<9)7QpeC%#KM)Rk{%|lL)5Ozv_rT1Y5hOGal*N+H2{8s~ zga4hKDT4^k*4&bTaz|3q$R>YsT(`;f?pDl6aR18ip4E2UJGw(^(DM{Edt*SA`+mO5 z`(w*LoH5kf+gr<~rHX17*W2ynzaIC3olmqrva#wZDs8AG zazKISrnSh3A1};S#dP>%TrdVH2}|}Quo1!D{#0nZ3HZ;V?K5eB$x{|I3od=5bI>4K z@P5Oxje*-Pu0S=t5GjpyMPzO5|{BZLYekQ$bN18-doEFm33EusWbSiq(%5 zPTq4x_&2K8lyCRiX=JWk^MVk422#l_p@50Vk%m66@o}f+-Qa}OK5*<>zetYu0i>Nv zCj%(5C%Fk!LBP^(@9H|V{N0C4J;{PY$D$PqDgbx^m{cufka<+x~1# ze?nn0)IWhP3TR1dw276^NpeGM1>F1%sw0a=t!)fNlfXsW?5z2M0!F7#9O+CQw&C45 z(RmhHMSPwOmzC=ZmXSqzVRr@xRA`9y8Rv(|29nd=*J>Q{1GRFUmx5SzX{*+AmQR72 z=KfDO^To4ccEPO9e)QWpjZ`qK{dUCaIiA@k6_%yb&FU#*oaMd8pxnN(u>mg=S)A>j zHO#Tmy#_bgb#CIhx&BBg{Q59y4>$(Y0NwCEHquP{n!_MUx6?t>bQTLA&ud_srbsBx zN*v21oy|$p1j}|ok{FL>#7RaJ#kHA{zMO&Cj*F`6WLeH^?js@L`w>A6?Pqz0VVXp7 zL|K(Mj!m=S2ve?7{6CC>I-^kG?CgvvPDC{jg=Be(X&OjUMWf+~RHsT}Wp$M>Owe_q z{(to75J^_p+1cjsaF6kLOp+utn{6D&tywD--EN00NeIG#UZ*DlW2*}^JLl}|97Qv* zY@7LPP8!D)MTTV?c&OXhq~G13%vFB-t6%4{pL>~pzsuSAm`mGNxO)8tpM3E}+T9LO zlCn1FGaMeUd;ftbQ!oB7Q5b_Hp{g=jDu};f6wz*r-q3M88V!%dBH-+Fh@vXAnr$pw zL)Z1%o`+Ob%w|(eUBhd5P*pTr4p|!G&wNbN!nPeyRVajx zu&fYCy;;iH$qB>Zxv;5dx6o7-)iCP=LgwDxUBWQp#*HV@be*@}e2YaGqH6|sZr!EZ z8?dpt%A0S!hAd}nZLbTr>8j$&)hqOS1A-uAe}A8Dw})+*gfpM%WQNyhh;Yjibls#V zOLp(w=j3=Ktl!ImAdK-E9&wxygb|MG2q4L^>))-)-1m9#@Bv=Kt9!mFx~^kc7F%1J zsH(zjKBLp_;CUWNlCt+`kM;Expky!@U^@o7u8DHBCFZWv=^Vo}X*N5=NmdiK6K1ms zhGDQ+1VoYOg&oI1QBn-4`*kz|$g(=*a6M^R;>$VZYT zCgWMn;z-%qzCsj7gh5QR-J#iR@y&033tiK=d}Rx-p)+5c(P=l)6cbG|#9Yr2AWv0^ z!Si%F!*!Y@VGLAwjzbbBRAt4zd-td+iL#P0Op`n>1cwxf$|9%TYVgd{*J-yK^jBNtWsGi`RFcBk`4GdjSXo~u$x_lZLzfJitriFS z2kbxGV>0oHqX=12P&A!JtIOKz3Z`YyY&mtFqlByo_0N13VCWj2XA?&;i}@T?QK=}g zER)ssHNt3t@6Wk&`!4_MKm9Gv&qA(Teu8UHZ1K4-{1{I^dxIp-NwS#Hbi^BPzRvC2 zH!*aT&9!wF$$}so(OGR0M=E8~;Ip6kBES8cU*%W+%fHVr{Ni8WCqDlZT-m6c&TrBA&;mJ0mie+OA~Sw<&4ShN5D002ovPDHLkV1g*@a&Z6v diff --git a/examples/pretrained_cnn/data/puzzle.jpeg b/examples/pretrained_cnn/data/puzzle.jpeg deleted file mode 100755 index bbd3a04f0939c0afce4668c15acd9136701e71c3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8892 zcmaKR1ymf%w)Wrwf(`BjcLD@=5@gVz!3lnFcMT9+g1f^o0}SpO+}+(>fh9XBYWLo|c765L%fibB09#HPBn^Or0|4M&FTl$(AQ*s(jDiaU;$y$w z1o(vbghcp6LfQXFpccTn803Hq=0SO5i0SOTW^;HB89)N&@_>L0^87QG*t(tRSP(s0LMf_^gkEhNF*zJGP(QrZ7=a}`N_HB8N|`Qz>5c!BX^?oBkw(MFcd*J3D8SSC`FkZE?@e~tO(FG~&QT)! zfuik~YLv`1?vFG#*C`q`Sx4IoQC>nr@|d)6o(u7xQnaN?*IlceEl+pvY--E3pn$zr z+Nc;~4T+Q*6$;IQnK@_*%|qXoUGtx`5`I2kxt%}`>k|^KK;p)vpAEnTAWsz{w&$IJ zCAyxQAmgl3j(f2qZdCVIhaNSk;zFE5f7pdP={LX5!dW4PW($2cEBiGJpJ?m9NTl;V zAfUMsV5Ff4Ev)Qv1}gKMwX{Uu zXC38&hx?vNkXi=`)0ca%&povL4Av0J}vGVQE8{ z9B=zvhk)H5{H;izuL-h*kz6pf+YaIM6SW+c?hx*pT+%f(4GxwfJadj@So(gM3pSi` zCXIiE40Q|@Vu9vL0#8Jc6s_~AnFUPTXtN7fey9167|~d)KF#(vms3Z7D#Wz%Sl}c) z$lYBqcgpmlFeudrE6(#@nfyG^6)cidpLKM#BuT0K>fzCBYU{j0&*2vuWE@qv3fevZ z!7-b91H?rFjEvoWeC+O;-*xQ5auggF)2L7v=I7g<^h%be$7G@Wg;$^^S)73RZdZ7} z5B3x%Kl>CZ!|ACI46U}rbtrU$w>`2yrg?V$vU%Xvt#9M?YyXMx{2q-ez!LKznSv^_ zn5|V!moCX7U;bVsi@rP~Ju-%}gcdrbF{qBYfimTpY-n4-o#1C%Gp8b-&yoi8{tKlV z6(G)3HiVSc=;Ps6qXVtCzn1CcMPHFs9a0)%{bOvIhcAfBbn!eXfr-MNP3%vVqbntw~Bkd<~r7vg|J>SvAO8 zX;9dxY{`^7I*2x}1TquOZ_YLIg*-D(m0ysE1O1(t>&|eu$}VyGn7 z*@VEkHnz+Np>0u=5i27ss}(`f;4B4S>8E3NvloECrgo1j?$g5M^26`cO`__DCHHphoKHo&&ksGT= zF_RZSwWX*1Ew6g}R3`lj66`Y)Sy)rmY0~UDQy0_3@0RJVYap~dM3aF~YOW7TnUqTT zuEG&FMLvIC!{P(WdM~Tb3m~wBOFyLcr`|$5yNyzzA+@)~!rb0QTd~24%RRX~{|Z%c&%?-mJ$eR?sJCI^UK2CT ztbI(5shu~2%|s8>3jgCb$jJN0Asd;n)W*7*E4%4M`QAummmr+~S&0#3)-lFA=XK|J z2!#TJ^(cmeI8Yl~3#?8YJ!KG<`PGXIV6?RPEr_h#EdIfnb#=u)B=N%{{2(Q!8sLUA zygvKZ<7BKInxfeeUFFDHe$hjm70i`-@m`*hJN;CjD>CKI=Bq^s%}U1xN#LC$u+*u+%9bk^p;JimC7%%XO%*M5hJ&Wpg7uA7A&dyzIEh^;RQgPpHh?QexdjRF!)Xq>}EdNk}9l7XB1XKsf6Ry z3;I3$cZ64jy!JRuF&sgQk@M3S)$LXj`jWK5$U!y$%mH}75(`v zXqaDr6DK65s2#oQtkr9h7ZvdDlq-Oj7;zGQh-wga@J!D}5VL$n`Qu}#9OoSJC9};i zH0K2X`oppN6l*HLJy-DE(sEUXqwUv|Pndo|4H2i8@n-a*XP_$$L~vR6rrq#kH-FhX zB#m*RgGZsX1GWf8n_?qpvtRR5?uLD}v#l~Wo-Q}EHIbbn!b>ZBb)D_adN_&mpkAh# z;dlYnGZVV-gP=jgFeLSfgs-R~@uVQLq|9xkQHJkZ=F^KBSZCkX{6XVvW}K(j!pHo5 z@sus1@?w+kYTmm-)OBW#f?X#9ls#xzf<_b=DqyEWi~H;SA8w^O=aw$I9cT^Y?@k3{75Gh3Gn>ys9`m(9NHRTr^?K1@#n*gT+3fIg#4k% zw?-#ZEN$x9T$Xubso`b%2(w;VZT1*fg%%?^j(hSN6C|*_m&TGv4sJ@8HP#E7RaFdV z3Yl6>AZik(H6&ur37zq)L4m^DvZ^+lp{$p_ICd~<@Ce(7?{PWJW8RSJlwspeap0Z^ zNIy;&N2(DDOqrofysHrCEGw1w@w8nZF1gQ2ayULaByU;k+Yemx4q%4Gay7+L+l%Tv z<4xNYC#~FC=sXIEE2It(J4+>EjRVw|43_}N$mhH>}dB}4#UjR%7%ipfK^S!c!v~0yfue}Hz*$}tG2SM&e9UB=xr(Jc`zxMrh z!wkOK7mm!bGu+ma89b)@1he}dcArKy5~I3{b`nKQREqJ2mrKoZ7nq@sD)%rP=CZ+I zS61*nHta#i>IT=EnEfn6i{{`|Fzi;R4#C<%XRV1?c1U|MNi}|*L4MIS?O@vdcSN95 z-Gi+NBkxv*Q0{Vs%}@&af1z}ug%O}S;`_#UOvkp2@EUZII!%Zl?qwC=2Sz#KT1ydm z9ERxzb<6jSEh=ZgvZ#A@x{CKkKOHm=$r2P3q3n?8Cgo~gnLqeYWxrwm^?Vj@1t9bS zkPxu1hnDdjc%`#ZR70=&m^@zqC-IV=`_ru3PU(pstvD<_##HXKlbWqy-I>mTh2kFo z?&z|KyX<9kE?*3r@LJ>4p-{RiUaNUym|DnBEoZyq>xVINyK)^5a6#rDe0wb9;cp9x z=cm7KCzENI{i^$(}7uV(opVzo{=M zilVDPS(Jiiw&r?D38&kx{+M@+@EbyXJ%XC?wR# zVO!Q%JyC@AOquXb$&dBql5=lwJJ%b9Ea*WU3xZ}y1X&_`6JplJK`n#8wOQ+CLLyXV;# z^R-@MVOC^rrh>ntQC1z|>j~9N>~a^L=-IVJyA_GN`D;@?G4s?tx6+czImW8>I=Flz z?$bxhZ48t<%%lGa^n!EVeOH^TjZs)VeDfY(-I0g+n{;6T+@2KJT17n@FWF}@enxc- zFQtSN4DPl4oEgs0x~3t`l8;KmXKs^z6ZQdGHNN*-9ouzFK!Havf;m#NZug1E5CSfG z2ALeWWFikkI;CX5Fe4|>CPP-3A*{H6NTW$qrom{xoJC2<`32AlY8Hatm%IR&p9UYK z(c~li%Y3zILROE^(^UKKqo0t1Gs$Y^SJ_uQfwdEiXt{t704-0D0IBV65{T;6Jn(aj z0@8P8AD1^-**TU_Zr15cgnnE_M}Pdw)<$0}5X$6Jj;Gzh@+~LCce=ggFy&+5GJDKO zD6uJn%7C{AGVf$(1k|`!oDU`GMB065ajcE}3Ox3gj7Tc###|L%qi(QZW5YTUT zZrPs_*2L{JnUtTCZ$;dsx719ELnUbqi_3Sz%i{02m`?>^aZX}vDNjG>SSD2rTkvoG z#049Tp2Z{^rA} zKPFTEZ=(J3zV?Or7r?ZhKOy05^0SVQ$civ;#=6#AZ zMz3{@%%-SMF*5kOu3j5>A|XZ1IujiDn!K?e+)W>LsjQK<&YkodPR5C>3G*Qu(c^uD z!B--ETwY81Hb!Lfl~dMV5As|fRwNN4#i8Fdk7NY{m9pZR4w{Rsl-OvEU{t8o&-5<< zOH+GAitz^4a*Xk>2?b|L6NNK#NrJ3VD19q@npEcUF&23_484$MZs*ME9I5Oagx&ij zK53shd8X_2d)~-#yekY%@b+g53m!6L;}w>!T)l)YE&=oBb&ux3SA=Y9v%YM6mSm&_j~=F1#v=Xr%)t> zVCpV8+Q}@Dl=jw~porU?CU8kFgA8UIG^+dnqX1Uf>Z)&c9zdqVD**zFd9ph=5lQ^~A?NUS1?p4c^f-ic&I{TeA zuNiaLS`=aGKL zQ0X1yBHLw?(UvaAW%|Hu2>7~q!QnK#uhaH#al$gH4ZN^GTFYALQ7O4Wt6||Z_V*y5 z&A1^H98|b@B5f=QI#4dVwAoI%j#9m`AZzR$&zDWFy1TB$$sK?sB1)IAZm%1jK|@K+(py`B9c2+;B*3% zK5ghGv6TlNU>^}TDrpDS3x7dL9kc&rySE1uPSa)n?Z{U(!rLn>*j72qENJ1hflj)E z_sZ_fc#h}hEp0!mx1&TmLB%do-!)y4D5fhKrGn=U+H?t3F1=#93%L36xGKwW?o_3U z1msHx&95Yj8v?ZX&DwhVKYtLZ-`x~Eo?Av5o*$XirTy}QfcN3I!te5?U9+T-LX#~f z*f&+U#(}@2t{SJ6$%QQ3;;d*`8(x8MD(M0bYT>;hYEjr#{kF)vC5}I!jiP z`@S+NWvPwr>uF|$D_=LR5>N90E#@dE{uB(iAq{C@2V+JuTqV8y|&O#G{LrNiH(1AF>v8A zi`Pd?+bVX8n#$Y)XR`~n9^tujV|!gcb6U@NyiRG?t$h!D&p#Eet7c_{xHP(<6jkhJ zUa)76HvP%Tz!5@=e-jRJ+-Y1eGkrZ61YN{)ztJyDjZ^}w~S>GKC5%( z8HKYIBqN{iHMfRnHLUi`!A zBmPj8iiqcwY?$EDer>%!jmJ`sS})(U*fD4+-8&sWm^0o%Gx0R*u_KR|0Y&xJYTO{& zNP8(CSwo4tN2W_&JH}_8Y47L)m0dsjc;(8*o1?fsmklm^CF=s{&nADAnmt6S@76?3kd)79ffE=-G8-O3t2bHI5ZG`_6W8J8sgmL1tfDF7ku05CGl7--3dPN)|udX`!* z5x#|R`ORq6^YF0ZyT)eL%q$)GpaMhn_A@&}C+?Z@l8(;qLiFsX=kb-dzP#+E(xaidkUSho$OIz*bAy4~wzc}6UG{%je58_lb^kPpU%+w!WWuMQ z(6(N@4wwDxcxdYo)8m^?O&0TFq`&|{0Ae+mQ%m;ie#RGsvtxL!8Z2syoQfjBj57@U zDD9IRo|CN*^qIEp$Gljl1P&524mY+(WK?1H-tr|_2WJDg2HZ~U^T4_P^p;d~U~wIAmb)PK!mfnph?E6d#EHp(G$#l-yjxq!e zq+d8er4H{Wc9fI-st38$tX*85*1KN-HeG9#ns)RC?AQ(Pi$A#9Yc#gJ@c8)6($^+$ z4L{G^B^vyg+id*aA0W^zRR$e20+Na?PWwmexvNqB;ya{4W|4i+u0MgT=Y>9{V{-I3 zzmijpPl)yX3QSRgz(9;o|8nYr1L%d-Ft*!*K&OOhLydz*L0W6k9f6B;hAtKAdrY~X z#UB;s?q#5d0v2di2My;Ud&>J+dehf+(F~r zNKxJ%4)PG&+{_R~vEDrP8){>CEHCr<*1E2oi6=6S2$ME`I9v-l>^(?K1XP6ja#Q8-NQTEt|REM zNWBPYm^Nb>yoO?pd^IV`0S{+F7_1Ya@qL5rmseZ1V}feR=wgNA@w)=1xwP>rZ(JOy zvg;hfGzn3+q`!8v6oEvxDf;8xlBE_efaJNWx^rN>PNYP$}F90-5oYfvXzj#l@?V=ZeiOb?WUT$C(4_bl5T?h9v?NKnECQbrU z&%b!*6~_Yc6%s+EFDaTaf=ja)M$?TVq@A1KosfOnd|P8?aK>9QhQ z2+aA+>30&K^qvoGi+As%jPQw0QjBD|ipJRh14XeV`NNv|G2a%js7%3=iO_Q0*U5xq zUT1FNW^Q+>Jf|ez)%bB0AYuHBi)UF~IsQFJ>jsgsHt}55K~2%Uj2su`)GQHj zFYc`gYeSp`^_`@-I74gOvMU2_U0XjUI3W7i#%8C{%dQb zPX-OY#kOKc`x|3l07Cu`6*&tjtI_wx(Xcbs$l{;lZf28m(I|WAU)+WIFh%UqvXeV3 znHbu6@1LTpr=5p{H_54@C_1TM!TY~G=|iTNV*Bag3eu@FjPXy?Cm;G zT@vZRqlG~vG6RER9jnf=h|W&ahq!3FL6=aOuh<^j43Bt`{hh8ik%zijqmeOY0=i?u z(j;i8-D2{ZV`QuoaQ;&`UbYoLMG-_5xQpYQ{Ua=4;dpKA&Jp6jWu>QXR%!S%ILPEt zy6cI}uBJZK#CgY~zE1a<@!F8@RuDVOV*KAK{Euvh)I1v)nMO>wiZ%M7U1#qqN zu5YLHOK~Io%ni6bIC*3+CoFR>bK5&qxjr9bBLY1=P~t(@vVmA% z2_0sK-n-V#1hT^sB5!RR7rHRXilLR;S1_b5&br0C ztk80k0wP*836Fg$yh+)S;oCD?Gbk{OWCF>cDDtsTpjQ{af&jcNjWM}0br)j8oAi0I zvTXATHIkkDVHdqaZ#QCX+Gwi=7N!i%y^VA*eV9~}PS}a~q)HG_nN3(Bea^JfqM}YR zTtF0BbfG2mguX0rNV8!vQw~!3Z^HARPg!0kEHWHweZ8&*0kTHARA#AH0|T}d33O_% zLH>2!Pu#CAYt0^u3}ZGKnCr`kSadz4jETgYU>EM`?2^5;npb0w?;q&J7gCM_V;-!m zOKTF{0N?)wGuK4U!I#pUOhvGDrZSL5O=^tvL$O%QC=Ss8sOa-h@7yJ%W&MEB`uG)m z5u!`u;`X>x3Plx06GrS{8YR{7=8kJD?vX=B$mNc48oU)GX-gE-tKlgAb~19!MfHaE z_<6iiy=3DsKH#yloa4eVrL3^&tn}pM?lumU}g6E0)S0{T2uf7n4>805)@qdhPbaA#eDo@!?GaAhHA@EYJ zDDV~M6XQm$jIb%E$LGK?Z={vE0%N76W^c^`?av3!j5|Iu9)?d-(D``Dc6_^a4Fq@g zQp@s@DedUF8T9u_=F{!=)?x6BUi0?m86_^Gr0*x_t+A!(Hofn-qK$sM;y+#lMD3aYJN0Hv?I>9PHk z3~fuaW_8NLJk%|wr-wIHr~d^I-2oztKchRgG;H(AGFh}6fJp~M)GlQmCBxM z5>J29(ajO*z2R9jcJN&OZT^gSB+UDdO&H?8OX>got#BG8F^XW%tR}=D@V&B-@bgd` fVvM?St=UJ+HrkCCH$C_%xq5HJe`j6Fm&N}F;$wnm diff --git a/examples/pretrained_cnn/data/tiger.jpeg b/examples/pretrained_cnn/data/tiger.jpeg deleted file mode 100755 index 52c82a3c7632c7f81e756303ad9caf099f9cfdc5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12270 zcmZv?WmFW-7dO6y#EM8t_Y%7Z(%rQ|EG!+ONC^nCbW2L-5`wfWEU_qEf`T9&(*328 zRuGW>`+M^ok&&5&k&%gsfsKKInUR5wO@Nb;jhBs)flHK&7c3|&EX>Fu zE+Z}^Eg&Q;NB{%^DIZX>P*Jfw;pOBN`X_?_oBnTkBJ||{rMdeGpe6&95{M8Ha03Xb z35ci(?z#ai001E&(LX2uj}YG@A^{MR{!gh+1t1_IA|fCmA|bj*N=)=G9zr5&05R7+ z8c|geT5bayv{+aw9giBSsD`xjm-q{hAFK2rb;IJ?-xA>wX||qS|BNsZ0Eh_wcNYIg z2#JUZ0Qdf>gsJ~+PC!IROicR!FCZYKCZgdIC06~PbviM0QCMnC=a1E2JoI<-0E&N( z{?k(flmX@Cj?OMhL3@x7PEK*okhoY%l%;1INtMU z!ovF3vc*vA?u)1{td@orNgD7PQ4#zk|sk@d^=@E zN+3eQ^&#@##@hRp%8>;bUX=&3Z5rY6!FylR^c8Edj+@%nYUZZ?o*v&^WCb2%W%5(g zCglPIOuVHxpB)uxv<5>KAZIz2TU^bgzv)2)N4W;NPE88f;+1!c$yB|mdph1p<5H37 zMdNpX$kcPNGx>@eocaWguOEg-i6pl>;mZkB7wlzftC5Ki7*9!cK()5@}$&M&b zkBy2DQ1w`!=e#UHD(+_baFeAmA?^ia3*s#z}14ut)gRwH_?I4F5AxZ5|klgIMt6Ay8T|nbL>WQ(_Qr9BKqDup}of)uK z>&bu_0`_|oTJ?}?*Qo>J6Qu=~{nLbAQ83?3G>X!(%rBfYRUzuUGz>P>CzBz`RK{V| zU=kUz$Kvazh3yMB_4h_9jKd@zeZaBx<{Xb)$#P)LDKmIq6g->#BU2pkBXgKmy{)|S z>y5yItJy_JdT&&CYaf5Jtx)P6AkNWkeuO|7K{I2j2;D(S+>UXwCXc5+e%MmGw`wFm zUY~Rze;LM?=F(n>N}3}Y5LkjXd~leI$<;L`xNp5_$Q)-(+dG;)Vi}>dYOrVR3%)jT z9x6iy#`DfkI@P|G7#*vyDoJXI`;;Laylt7^vAm5g0Df4wh#S;4?XevO@mqf}7Ef+0`xf zbK^KcBtKqo=1M8u4s;!5nasCPT;)Eyxxu_(Q=L_K_Eb1lzx$NFv1YDxp5~siWo1zu z*;Zmx6aHnBGjbk##TrYO*M{MvoX9ToaV<$=1_jI*@AkTdc*>-C1d z#sS|g!gH*D3UoV^bfJgk_pPhfgI8pn$OUEk0A3RB^;F$MQ{d~{CY;|)2HZdTY1R@o z5hIs{p3T+fM^eIe5do-@#YT=OMW=|fkg4+{j$j!0EoR9rt%QLhEq#QX^rHbIJ?~le z?TCB4BJTpW6nMxA`kPMh3m0xY0t-XEq!5zMqLera$U2B9p;1d=H6sJ+iv!&HekyrW zz7Mz=n3r~wk@g*If9<=-o9W}~`XC~4VH`gW7n3Qo?asd)oGkVX0(wtN0}gsBr7GO% zAy!AMkR?J|87g1)4jNI*d8_-3-aa!AX#yJ}Y3hp=A4xkF_9?$F)IaGnr=-JW&=c3v zuc{es@bXZ%2$(EV&{bk2ZTCNHK3&s3Y+e_FY-U80nUgJ?>+x~H=RbK+!jG#=uD|rT zq9$KscOGTA7%wFUy14%UdZz&i^5fmeqE!C|jX>=GlXt=ifCaUI5x)*!ccY{^yPMX! zFuf$%PQk?;AqPSYOYN_yq92{aJgBtAV@Gy0mFQ(sKDm>Alz6+K?>%@Go!f(nzmkQ7 zrd@9a?Oz@!58C~X;tjAy-igExLG$(@c-YH(<6G#;?F z>5f$D{YIjWPhGL{;Nv_=HHxn^E=Flkk4La_PZ4D5QZnGOG=*A&RWw~*)Dmk3G{%bO z1c_Gi=6+3C%;rvLa^az`wKy*}gSIQ=_2=9q+0xCA_l}p^d^Tj!7;{G2g63#_tTXCz z%tZkc4jFV^OKMTg@TvIP_^MC(58LmTwkS*$Gzht|8k>6?Urw=!g}tX zX$lN6eyDnfT<4Bw!07@0lrQG2c=0`MS0Psz2U%r@ne;9ow!Oi|acVW16!{+}XVA~< zAf6_dx`Y?IOI3coE$NeX!NPV~T?^bY$!)ui-v2?Fi_0cfb1 zSr~DycVUu)R~kOt8*jc(Oo2q$+rNCOBDnZK-EHlKh-ZSKyko_XY{M|;H4>tg*sxR# zsQC27Fo_PQk|tB>WYojo{0fwoLI9C;fVJZsWI=hkpMC-fq4YxS)r%8FxUX}`kb4W# zdMc?qD5?DdhciltDQqc^MCo4Lu@2A?E43n`sSp&&gHCo{3j8Il<~-tamagwt<-DwG zcF3XqmKe3X5fx@BNWd_!eyMfPqZ@;bp( zZT}$4*Ev*g+)#YiC0iN$U!9jL-i=0WSsHE_313WG$Q}B=y4rn6H>8g{RFs3=H}k%? z@OD~0MR_Tr92M>Tfa?`LQ0F}GGk*ba`_~xW`qAQ{)sx1BgS={is3OvP5T`L@K-6RM zh`(WBI+DX;!YGs7z}o|g)-mAXstPYY}|;$1aT-%3sTLeU!P-Ym`9Y=bm3j=y$?DQaio3Khox5lJ=t&c^zA+dS60ieZ6e_Jy$Zz7(`5cW%#%4_ ziSz#>yL6uPm5fjiunGpGC<2ZP?b zs5N#rTx?qWcaI~(p)t2+VF|xY^`^vE%y~F3Jmf+$=86le6(glK^3v%n3LpzD$^HoF*&|Lr<17%COe|kw;HJVG( z&RiMO8kPxJinC&78{Q8j%B2!JZo-*6Qo{_Ycg36n>}DIw=WN?QFXq5DXZ7U9ezq<3 zP+ln&XOjiwY!tjOvWnfhNAqpCZT z+VGNWNfM8(P`D^`0uefK54xQMqP2ys7h&0>dCr+A*{1<{AHl4-)hmVi2Z1BuE%^}yQ zqa2r2|Ihj8v8o7!Y8ITU`?$s13P{%6L1{y=*h~>2v8UU{+wI1tapPR~!njTH`ST5} zhXol9=DamYu%pDM;gMfiUWoV7qYe^4yk{xCwQ#OxAefsj1v8ZFEUong>60GiHao9E zTCQk!?5^vI_KLY0Pmx*F0^I@Psjq&h&K~w~Nq<~?@O|&_kqS!6)w>TwGMUCJX1YeG z0Z`r3qjfsRN7TnCQ*bbdkSPLW?f{VRt>q<7JpLmng^Z>Pp6iMRBh)%$ls<<#l)YF~ zn9n&{wApBB4?odwTm+-&G9Cvn`~(}&P;6(HICdSw>7Nf5KX!WtOZd|}kpuPWith(U zj3BcR9~WJGOHs?4kkQuj5 z7YHa1&WCnPsj(o&e-MDk8Dlw-tO2aQV0+r9H+HXBK+!5B+^FIz#`EJ-$rdFG0@c3l zygPv8o@pE_v!jz&7X6YSKQEAju8ddgg*ytW2u0YV?Ktc;YZ}gK$*3O5F`h>3tI}`P zV-{_9Jb3FMyATuImu&h9%pGUezuUf305ZzDYKVYIwtbhEV_7@u8T$nAM9{ba9 zbI*i@&0%S>oPz0?PO@3jScHhEp;LRfy-@g$pgF5M4N}M8J6vn*$%mXd@eGUBT6}|3eN80%cw<8G znMRZAWX^qpV(-HZ69GRKf#A38=v=z zLO;~Z5QNb4!mkHdpkvvE4Z6ac8Lli=1s9I!i?UR!`9MD6Uv||5uZ2I*s@J(!5N7inJ0!xl&h_{=GIGtEddiZr=Oo6`Q zOXW4m8g-L%-SxJ}Tv!yj&Ls>xL-0iOEIqC0*JH}JszyO;wu9r95dnspIjG}L(vl)2 zLN4YMm|9SI4^b!$(hNhQlHCI@T(rV9zwKQt5^WGP;}v;91ELBNvE^ANnb(WUQqg%~ zhx<;dqA)D!yzyyma(c2zneO^Q5z+6;6D>k0R^E(R02Te3VzJCdYbkFYgK+8amRL?n_qkD&Z+WGff+JE2ooFiE)Zs>hZxg^5hGp{Y9I+y ziz|gAVrhwCBMMBczeg=L60Bs39`L(BfvB2hDgrWOD@s)n1;)q6QH5i{FB2k9%(+n4qC71pFSkTHEJ7u=QVd+XaQ|IrFE; zEGF1Ws1oRcY-e0Ff$Za1c~k7jFH{C40+kT;#-Gn~qrf@S-@6U+RtOj+*VCHQ>^k|2 z6)t;X0KWrZp*tpdqAOXfqu=hIAzF5R!1=OV@P{h;vWH!DKQ$SiX9wj+(%q<-UQ8P1 z;|@)Veg?jpZJN}tPsA@q@jgc{CsSQ^UjAtR*z^SOc2oL6_}+aIY1hBw57m1o1ID4g zLXmW%0pG?HZgCLuOr$*}m~LR< zz-<9~NvVqwy_m9tvkcJCE3?1eq&DKaOhmpkUiNBw9_(CBUc7PWio z;1k*?>M?l4k;Fuk30{cosLaVsYxOUkJGqiM-4qCm{U~_w^A1o%DO8E{UaXU1@h1YK z%V?8;Mh^#ND(>aYHg#y_7&b#zWajkq68BNp{G;wL4mplb^R)-U#?Q~soDJrYtLA+? zKc2NK>E#cPlq}6ZT($&HFVPkN5iIfn07Uy%{5P|O_*MKDTS-|#E6mxH;=yoY(^g=+ zi3inK8nh+ObKdo(W$k*!_s%4bViD5Z6%<||T2inqN&P`lWSSB^$7fHa9(vLGE6aZG z|8C?RETGptZ-z|IWy5Cj!4@X{zZW^JyPPWR8=fjXoagvW<&V%q<^Bq|QNXdiX%tqv z1I+GgZ4L>Igv%zYPEO8$|FpUAc4Sa?{^XlbV}7AvIIeeRm-(k0kyMO8quwS*VwXGY z^mw}~M;daXo!>|I(f%UB+{YXYGoBhuqV8KFF92^$_y|drxz^H*Dd5$6*h4q2atMzM zT{HZW{kFxqgi6s-0tXymN$bgmkY9Iz&(@g+7v^e_cYtF7!s^dtMS30^axiq|OP{Y) zO{(6G>lRqUn@RoxXWXuZ9P{5tQ5T(255cO#9HngzYsqtXw*nrvRRhGs+hdjD{7GTg zZb_F{14!d4&s%T?hZBBDt~RLsvDlM0FB<0*wFDl(Jwu(l4&yFf{ljB~+=c`W`qz8D z2bI&h@|rI`tVunVa35dFx7N!y`!Ac|Q)jHiR_6!$iX0dtj-~au3N!PyX={aV=aZN& zpU07mOnC!C6WK7Sqsw#>^*BC{UK>5{n?zySWSK5*?R#rpyx?T%3ChnmYN~k(G=smR zJ4Xt@`i4p3c|jQVKk}9H&$nt0AFaDgmA?1!#tFDnyhiX}TYl*?cAnETcA0dY{+?E* zukaGZ-kWAcs+1zg!;%)`r{wbIAJo8Rzi01H$1Y=HNVLFl<&y7n?{M|}Rh}Y(^B0U? zbeG~LsIp|fIV-R&^Ek_rhLqh&8O$>~CH^<;LwQ@9%lGK-yDT1}Ddnm+ot5qtdXIff zL+nH+ytt=o$M=>N;NEt)Ao~^wIq+Jh`-GO-Xvwi-^64WV{Q2V#-Sw#~TI*}>{`o#9 z+fSuYbx+QnkfO2!EV&G@YXo%z&k#my;ic=R8K3dzG)4`WHH#!8EPF|gOJ?3xytcup zYrVrk>XJnvSpB}Y73&t#@aD(n_*-A7n$quY?P_|bxgc)>1<)Hq-7EF^E0c#_i=X!o z$X>xnOZ_g$x6)gR+uc)6_j#3RgfLv(dh(+~&N?|Q80@*d-=bgKmod~~(K1aYX{3$b zE%ndjp*IMzDUs9|qGTm_|E>daB*vkav$s|Y!2TbeS!iOkOb=qdVQjGShl_a|!%Vxz zzO6u=ecL)LXu095TMDB5b#bp{b-nMg)>uN#ue@DS61Roi83v#=Y$i4GPMr z^x_y~(O16Q-g4oBSY%_8%3wUkp6f`JS)whcw*}EUf7Fmi2s&xdsGv6|Wg#f`;`zja zM)2{_AK1+uU~Cs0b$`=NxP z9|<(s_N`bDcK7kWeW83?aWgOIc09f+3Tk}gl-eWxSZF<~LgQB+_=y(E%zfv)U*}<+ zm6}OJdRGFgSGfOib@F^WsNG>*w_n~D1Rt^^zc3Ym^SafeUY>dr*4PC;;R&uYn3H4m#)jM8 z7`2Hu=c}cTW)V1Nk)qMltH~?BjHXS*m207^^f0f|JuYd1qeveVI-`49VN4gY;}4Pj4&JfyxdZspEs2g4xG&)=;tvU>!M(n^pV4Z~E(mDdw~FrP`~&dCU8Bz5?^Mw=C|SH>Wzt zmXdFcroOfDT4cy~jO>RH8pX)*<)Jf8owCp9%=4tD&H6%R*AFOzCp9HHyr;;tDqp3@ zadC0+Bv8Q$anG`RcmH0IqK=ws1lf|k)q{t}^{cS}u=*V!)sutEqQ=V`e5yy$x zj#6!qMBz@X2-dpvC#60^6I#Ir5pgK4<#f3iJa4Is<9GgiUy$Va-*wXEmfh1YO=rx- z{eknkxX?wW@bnEnBug>>Jf`+3+TLVG@91yVLwg}8*C#k8DY(_3eW~(Q#YXUv52n6c zQ=_twC^A16)dn>VSYt#Fi9{1hzkk%m3p z96#hxT%*DdK!~H7{Sa?jF$F25n$Fgs{2y1hU8R^~Q;^i?J%Y8)QeCOgd`}CP^DizP zx^*zb^UU+GDOqtg5>37@M?y=6dW3XJkzPDQ#^lc0bz8|i%18m_Cj>{F2gG<7P;|Ceqk0u(82eTJ$+6Vvj$JC6@57@&8dWL^4lk{lW2ST85>Ob^ksvkj2_n zI+s4jY%0nbc}`w2XtM;7z0QH8fW|j=1`T@0@dFdn3D!19=hil z(G_hZIM(}xSbufUW441$>7D$>1R~hN_KThWv%?XA_l^`Rn46qrHBjnyPW`S_jDLny z5$8q#b$aTqYz0eRZBy$DRuv1qNAoT4+h*=}qJFNHfaWsB4IwR~SrHZf3U1H7?u!|` z?jG z1iq`2{UnT)8S7#(?wVefD~K|(kvD7u=y9U=%rSgl1gY*<1?u$2#D6dyt^ zObakp|7C~R&!*dcHH30vMtJ(xNjIF0aR4I%cEjt>%|%rQ(<#!;^u4R0wlYdyic>!g zz$7Z2$vBRgJaDB~+}e!_oBKenGV?WYX=(kFPNIkJor zxM%Vz3f9@paa!$qt<|vLZP(1?&i~(cOn!#*{&GjbSz=FdxCh~Q5tFM1@6$+n0$ ziLH;pZ6(Qf0Orw~FC%$xhzxEw_D|hUdx>^TQSKc#eFx_${!dQW}VA^E^Kcej#U zgu@1*P!NkHOZFeWmx zzI~J_%pCJy0L3QxB*|UqFR)V7AFG)FU?_in8~%KPD2U>Yce10Yw^yTf%7PLu1rfe}%}G@-UeYw6XW$>aKRfor zzctMJ|9W)avXHHa^tQ*Xw}GilLK@4v{JV*AdYPOY4NUBV>oXA(O5rrZSmPvz4R!Uw z295f=K!fBU^d7w7A0;`ls}^_WI@f$CZ$$W>mxGGWPMzed^N zeLU1`o(6}4kw&Bb{VbEC)W^n;R*%(Eng$XuOC_}OYFZuv{mKYsWEuG1_Yq zyh+y)7&V4a82@hbvZ$%4k8B)(Uy_3g6w+KI6ouZKL_CfGgco~Y1x^i4B+YO6eWEB9 zD&|1GqZDC(EtRR245^etjE5C!&DMLbJZa6=HZBC(F)gUW`=5Pn5WU87)ny+olpC3I z4uw<3Sx}jg02LZIk5|kNHT3L7x21Xc_9x@>IF2|OS59v+=rmrrOS?oNelTrO==ERK zzm3@X$WmmxNs+(R3^|G#{AO77_}o!e&SShdBRqprb;AJM6c*Rwvw9XX_IAT}lep5& zPb?+aza(L9fSk?mln4*r${bFyM@9bJ={IFEtvNoC%v+J|JI-&}$@vWt%!X5cRp8)B z-eWb;MqF-b{F2!bQ_f)BD#YhUzE~*HsEZ}~n8h<*>@dq~SpV8!4SE!p5p9=G=s;|% z_$K4=R3;54?vM}lW^yL`eR-y)f#oCHuf^vZw%)F7tP!!afIsVHxV7@QiQFs8L|5yB zSOq^LN;p+Pw%Di$^{#z=0p70NR!7-r_LguAq`fyxq zXZ|OJ4x_w(0Eu_{md@3f?1!7uDmp-Q-aOmW*yQi83AX}PqhwH%9sg*tKF=P0|M88Yx(1>(vpYb8 zQb)c0USPVaOzzd2XYG*BK6;bie*i9YzH-c5O!?V9-*6V3bu4NUWFh7fA@-zA&Q{{t z&uN>jCxi18D*s6?Bi%7hWV2~69MYiOQsE0Gzg4TU>2W2~Gj3E^XFO-R)ZhHR(Op$8 zlqk<5tn9V=*O!=5nJjrv;(Z-+=92veGO8arEX&3}f)CEFmD;B&Bp1VKQRM4*-{^Ww zKWTK{ap(A3NTZM>N3d=DkEJZzhhhYcp8cbw!?=8+iKEj34wGueyeillM!ltg%7ByLLXa{a>vS;IxN#BCS}jOqTLG{FfxlmLtE)8+``Q{=yq> z_g%0T*Fk5FI9SYuz?rLc9;J(d7N2~0alAbQlZ@fy+*$k(w56&0T1M%4t!4h#dHmF( zLKYj^c`1XAm)M_I;m<}6`#R#)lFW5gYv~}IM5>ULMk>^?)Dfd+T($&8RW?NSeyb6X z{~{4yOnMGGE@GZIqNydf-=O7&YllKnR5VprMuSRwBZ_Or$Kj&b#tFDH0YZatQj6|mVRhR2lT@G+krF`~QW2*mPpj+N?dzM5ue!0CTt8-f zR8Y38A!U+!QdmiP`O9NIlj4Qs%}&X)s_NA-UPQCLN>XSeiXyN~(s{qed0HKkPlPd=*bLss{ zLc7+wY;ZqqvYX#ps#)!z+xd*}`6w+9x_|-s5TXFxG32Qwr_$cI$}h0+FMByXB$I*d z%=aJ(-b?4s&^XXN?6u%t^zLpxoNSa)r3oJ0+#@WRWNvOV(0m_0^$ueBn^uNE!LA!4 zLk$bGE&seB`byeOMEwP96v7k3-850ZC*AAq?{)euvG5MiW*L>MF zqGi0@Ax^k}@98m5g{PEFa&ZOn#6+EILMI~jT?Y5wF22Np4$@L&fC4pi1`Q0NuJ4)l>B&==vN;s$0wb9bTD5ZVmVEx;#*mKbO@eG!~ON=KbDenPq zLkqceH?JO86IBQxUdg>ybQVIQTAc0$&o1nijEc(e(=wIw$9Xyl%Rl<|ShVtJu?BlV z)Wx%DA-u_x5p)L_D5rEbjn~`cf~#J`_4c8B6k)qi3w3meYI>_FfA>4i!9YdO2y=^aGjHZx(&IU*PXnBr0YDhpT1Op z?2t2MC8dh7J>gEe5Z3PYBn+S|DVmE>p2%XVT99o6RRC`; zXk+ZSZeOnOb%P;P_xKPA9DDPvG%)@lFVW=ei;$C?IXzd}yuvVYy`LAv?~;`OTd{iD z5fWSvPUH6gzK5I%j800k8NiSK>cT@-uYqdyIM%PUJDd-Mfx%DXOfaVkEMsqd2T@UV)QaG?2~aIn-|;R^gW+5YDO- zAtG?CZPfW|XP&KiQ&3n70VB?rc}KK;Iespw7#FIO|1LStuua>2Ga~Byf59u;-@_vh zQc`G&eJ;JIjVwFnt?-sjob1uhd#9b9{%G^cBqbYvaANCcE>eWYS`pYF;X8nlRKk;C=71Atsl>{lc;fs3g39pA%qGt(!xjC5ot|#I{;t%w z=w|KP#MTbu}efx)1gyp_@|+qY=@QIekZKw7jq@{N;^{*HYW~ z7#wYbUGGsi_{kS(UFmgO^_s}3$S2WqJMGoZry(57v>n^VjDf7EvE9)IAp&SvZ z%$Q^N*E6Tr9c>_B@%1Yu7L`&(g^-K0_?|FmdM|#apwYq@2zC{O<95XE&HjT4aDZJlvBzKItd5*A#$ z&wSwe?d%-V;>2x|;b%k}gR))I@tn@^Fx5{MKf z!SsmN2Z@9BEN_R!-HVosAPPEupF6mKH!gyL)WvpZy7e5u82Fl!fYC~Rt_QfGp+yL* z``vGukEhFTC1+sZ`v?^0+Gp^~Moa4j38}JK54AI*k$fmUMMN}NSxoSsk*c2M2c@d> z7(jB|bJ`>x2}3Q;q>fN`(SKo?y!+QEILDZ*JW+<6zURzn6lx@!q^RgnTid*@G`Lsb z#uKwEZrSjfEKTP$spcfVAtNJN%+r-LY{TznU1=)e6MgRpdPW4=Zg20D@cSh(-Pi2} zg@jAyWHivkgH*MT+XJ4RtteYng@+Q)&W0-S+LA_Kj(4_?RCbx4ekf#MLi8CCmhk7* z@ef@VVD0T)-!Zt+rT&HkDja3RuU>pws7kYki1YZWj*zh?DHVbpD6h9aCyOutTPPd*yE(S^HLxn&UW<4AYR&8?hRV!3_o zh^UABQ)LTwA@U962zz~v?p~gu|3$kRQn+S8W-WDO5mQTggNSuO15@g8)V#bz z diMwg}v}8{4M4gIqeK+l7+vinP)e?7e{~sY^|C|5- diff --git a/examples/pretrained_cnn/tutorial_load_ckpt_weights_to_tensorlayer.py b/examples/pretrained_cnn/tutorial_load_ckpt_weights_to_tensorlayer.py deleted file mode 100644 index a3837f9fc..000000000 --- a/examples/pretrained_cnn/tutorial_load_ckpt_weights_to_tensorlayer.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorlayer as tl -from tensorlayer.layers import (Input, Conv2d, Flatten, Dense, MaxPool2d) -from tensorlayer.models import Model -from tensorlayer.files import maybe_download_and_extract -import numpy as np -import tensorflow as tf - -filename = 'ckpt_parameters.zip' -url_score = 'https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/' - -# download weights -down_file = tl.files.maybe_download_and_extract( - filename=filename, working_directory='model/', url_source=url_score, extract=True -) - -model_file = 'model/ckpt_parameters' - -# ckpt to npz, rename_key used to match TL naming rule -tl.files.ckpt_to_npz_dict(model_file, rename_key=True) -weights = np.load('model.npz', allow_pickle=True) - -# View the parameters and weights shape -for key in weights.keys(): - print(key, weights[key].shape) - - -# build model -def create_model(inputs_shape): - W_init = tl.initializers.truncated_normal(stddev=5e-2) - W_init2 = tl.initializers.truncated_normal(stddev=0.04) - ni = Input(inputs_shape) - nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, name='conv1_1')(ni) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_1')(nn) - nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv1_2')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_2')(nn) - - nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_1')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_1')(nn) - nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_2')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_2')(nn) - - nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_1')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_1')(nn) - nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_2')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_2')(nn) - - nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_1')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_1')(nn) - nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_2')(nn) - nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_2')(nn) - - nn = Flatten(name='flatten')(nn) - nn = Dense(1000, act=None, W_init=W_init2, name='output')(nn) - - M = Model(inputs=ni, outputs=nn, name='cnn') - return M - - -net = create_model([None, 224, 224, 3]) -# loaded weights whose name is not found in network's weights will be skipped. -# If ckpt has the same naming rule as TL, We can restore the model with tl.files.load_and_assign_ckpt(model_dir=, network=, skip=True) -tl.files.load_and_assign_npz_dict(network=net, skip=True) - -# you can use the following code to view the restore the model parameters. -net_weights_name = [w.name for w in net.all_weights] -for i in range(len(net_weights_name)): - print(net_weights_name[i], net.all_weights[net_weights_name.index(net_weights_name[i])]) diff --git a/examples/pretrained_cnn/tutorial_models_mobilenetv1.py b/examples/pretrained_cnn/tutorial_models_mobilenetv1.py deleted file mode 100644 index 8d7b35a6b..000000000 --- a/examples/pretrained_cnn/tutorial_models_mobilenetv1.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" -MobileNetV1 for ImageNet using TL models - -- mobilenetv2 : https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet -- tf.slim : https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models -""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -# tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -mobilenetv1 = tl.models.MobileNetV1(pretrained=True) - -img1 = tl.vis.read_image('data/tiger.jpeg') -img1 = tl.prepro.imresize(img1, (224, 224)) / 255 -img1 = img1.astype(np.float32)[np.newaxis, ...] - -start_time = time.time() -output = mobilenetv1(img1, is_train=False) -prob = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(prob)[::-1])[0:5] -for p in preds: - print(class_names[p], prob[p]) diff --git a/examples/pretrained_cnn/tutorial_models_resnet50.py b/examples/pretrained_cnn/tutorial_models_resnet50.py deleted file mode 100644 index b8f8b1c28..000000000 --- a/examples/pretrained_cnn/tutorial_models_resnet50.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" -ResNet50 for ImageNet using TL models - -""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -# tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -resnet = tl.models.ResNet50(pretrained=True) - -img1 = tl.vis.read_image('data/tiger.jpeg') -img1 = tl.prepro.imresize(img1, (224, 224))[:, :, ::-1] -img1 = img1 - np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) - -img1 = img1.astype(np.float32)[np.newaxis, ...] - -start_time = time.time() -output = resnet(img1, is_train=False) -prob = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(prob)[::-1])[0:5] -for p in preds: - print(class_names[p], prob[p]) diff --git a/examples/pretrained_cnn/tutorial_models_squeezenetv1.py b/examples/pretrained_cnn/tutorial_models_squeezenetv1.py deleted file mode 100644 index 9b6ee4e7f..000000000 --- a/examples/pretrained_cnn/tutorial_models_squeezenetv1.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""SqueezeNet for ImageNet using TL models.""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -# tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -squeezenet = tl.models.SqueezeNetV1(pretrained=True) -print(squeezenet) - -img1 = tl.vis.read_image('data/tiger.jpeg') -img1 = tl.prepro.imresize(img1, (224, 224)) / 255 -img1 = img1.astype(np.float32)[np.newaxis, ...] - -start_time = time.time() -output = squeezenet(img1, is_train=False) -prob = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(prob)[::-1])[0:5] -for p in preds: - print(class_names[p], prob[p]) diff --git a/examples/pretrained_cnn/tutorial_models_vgg16.py b/examples/pretrained_cnn/tutorial_models_vgg16.py deleted file mode 100644 index 7d224c235..000000000 --- a/examples/pretrained_cnn/tutorial_models_vgg16.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""VGG-16 for ImageNet using TL models.""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -vgg = tl.models.vgg16(pretrained=True) - -img = tl.vis.read_image('data/tiger.jpeg') -img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 - -start_time = time.time() -output = vgg(img, is_train=False) -probs = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(probs)[::-1])[0:5] -for p in preds: - print(class_names[p], probs[p]) diff --git a/examples/pretrained_cnn/tutorial_models_vgg19.py b/examples/pretrained_cnn/tutorial_models_vgg19.py deleted file mode 100644 index 3f04fe9b3..000000000 --- a/examples/pretrained_cnn/tutorial_models_vgg19.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""VGG-19 for ImageNet using TL models.""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -vgg = tl.models.vgg19(pretrained=True) - -img = tl.vis.read_image('data/tiger.jpeg') -img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 - -start_time = time.time() -output = vgg(img, is_train=False) -probs = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(probs)[::-1])[0:5] -for p in preds: - print(class_names[p], probs[p]) diff --git a/examples/pretrained_cnn/tutorial_models_vgg_static.py b/examples/pretrained_cnn/tutorial_models_vgg_static.py deleted file mode 100644 index e5644395f..000000000 --- a/examples/pretrained_cnn/tutorial_models_vgg_static.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""VGG for ImageNet using TL models.""" - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models.imagenet_classes import class_names - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# get the whole model -vgg = tl.models.vgg16(pretrained=True, mode='static') - -img = tl.vis.read_image('data/tiger.jpeg') -img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 - -start_time = time.time() -output = vgg(img, is_train=False) -probs = tf.nn.softmax(output)[0].numpy() -print(" End time : %.5ss" % (time.time() - start_time)) -preds = (np.argsort(probs)[::-1])[0:5] -for p in preds: - print(class_names[p], probs[p]) diff --git a/examples/quantized_net/README.md b/examples/quantized_net/README.md deleted file mode 100644 index 565313040..000000000 --- a/examples/quantized_net/README.md +++ /dev/null @@ -1,6 +0,0 @@ -### TODO -- All TFRecord implementation is better to be changed to Dataset API. - -### Blogs -- [Google量化网络实现(CVPR2018)](https://zhuanlan.zhihu.com/p/41121544) -- [神经网络加速之量化模型(附带代码)](https://zhuanlan.zhihu.com/p/37220669) \ No newline at end of file diff --git a/examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py b/examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py deleted file mode 100644 index 3f4d0fcf1..000000000 --- a/examples/quantized_net/tutorial_binarynet_cifar10_tfrecord.py +++ /dev/null @@ -1,218 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" - -- 1. This model has 1,068,298 paramters and Dorefa compression strategy(weight:1 bit, active: 1 bit), -after 500 epoches' training with GPU,accurcy of 41.1% was found. - -- 2. For simplified CNN layers see "Convolutional layer (Simplified)" -in read the docs website. - -- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !! - -Links -------- -.. https://www.tensorflow.org/versions/r0.9/tutorials/deep_cnn/index.html -.. https://github.com/tensorflow/tensorflow/tree/r0.9/tensorflow/models/image/cifar10 - -Note ------- -The optimizers between official code and this code are different. - -Description ------------ -The images are processed as follows: -.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training. -.. They are approximately whitened to make the model insensitive to dynamic range. - -For training, we additionally apply a series of random distortions to -artificially increase the data set size: -.. Randomly flip the image from left to right. -.. Randomly distort the image brightness. -.. Randomly distort the image contrast. - -Speed Up --------- -Reading images from disk and distorting them can use a non-trivial amount -of processing time. To prevent these operations from slowing down training, -we run them inside 16 separate threads which continuously fill a TensorFlow queue. - -""" - -import multiprocessing -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import ( - BinaryConv2d, BinaryDense, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d, Sign -) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` -# prepare cifar10 data -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def binary_model(input_shape, n_classes): - in_net = Input(shape=input_shape, name='input') - - net = Conv2d(64, (5, 5), (1, 1), act='relu', padding='SAME', name='conv1')(in_net) - net = Sign(name='sign1')(net) - - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) - net = BinaryConv2d(64, (5, 5), (1, 1), act='relu', padding='SAME', name='bconv1')(net) - - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) - net = Flatten(name='flatten')(net) - net = Sign(name='sign2')(net) - net = BinaryDense(384, act='relu', name='d1relu')(net) - net = Sign(name='sign3')(net) - net = BinaryDense(192, act='relu', name='d2relu')(net) - net = Dense(n_classes, act=None, name='output')(net) - net = Model(inputs=in_net, outputs=net, name='binarynet') - return net - - -# training settings -net = binary_model([None, 24, 24, 3], n_classes=10) -batch_size = 128 -n_epoch = 50000 -learning_rate = 0.0001 -print_freq = 5 -n_step_epoch = int(len(y_train) / batch_size) -n_step = n_epoch * n_step_epoch -shuffle_buffer_size = 128 - -train_weights = net.trainable_weights -optimizer = tf.optimizers.Adam(learning_rate) -cost = tl.cost.cross_entropy - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def generator_test(): - inputs = X_test - targets = y_test - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -def _map_fn_test(img, target): - # 1. Crop the central [height, width] of the image. - img = tf.image.resize_with_pad(img, 24, 24) - # 2. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - img = tf.reshape(img, (24, 24, 3)) - target = tf.reshape(target, ()) - return img, target - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -# dataset API and augmentation -train_ds = tf.data.Dataset.from_generator( - generator_train, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) -# train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) -# value = train_ds.make_one_shot_iterator().get_next() - -test_ds = tf.data.Dataset.from_generator( - generator_test, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -# test_ds = test_ds.shuffle(shuffle_buffer_size) -test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) -# test_ds = test_ds.repeat(n_epoch) -test_ds = test_ds.prefetch(buffer_size=4096) -test_ds = test_ds.batch(batch_size) -# value_test = test_ds.make_one_shot_iterator().get_next() - -for epoch in range(n_epoch): - start_time = time.time() - - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in train_ds: - net.train() - _loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy) - - train_loss += _loss - train_acc += acc - n_iter += 1 - - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - # use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - net.eval() - val_loss, val_acc, n_val_iter = 0, 0, 0 - for X_batch, y_batch in test_ds: - _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_val_iter += 1 - print(" val loss: {}".format(val_loss / n_val_iter)) - print(" val acc: {}".format(val_acc / n_val_iter)) - -# use testing data to evaluate the model -net.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in test_ds: - _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/quantized_net/tutorial_binarynet_mnist_cnn.py b/examples/quantized_net/tutorial_binarynet_mnist_cnn.py deleted file mode 100644 index 4eccd5c2e..000000000 --- a/examples/quantized_net/tutorial_binarynet_mnist_cnn.py +++ /dev/null @@ -1,106 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (BatchNorm, BinaryConv2d, BinaryDense, Flatten, Input, MaxPool2d, Sign) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - -batch_size = 128 - - -def model(inputs_shape, n_class=10): - # In BNN, all the layers inputs are binary, with the exception of the first layer. - # ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py - net_in = Input(inputs_shape, name='input') - net = BinaryConv2d(32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(net_in) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net) - net = BatchNorm(act=tl.act.htanh, name='bn1')(net) - - net = Sign("sign1")(net) - net = BinaryConv2d(64, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn2')(net) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net) - net = BatchNorm(act=tl.act.htanh, name='bn2')(net) - - net = Flatten('ft')(net) - net = Sign("sign2")(net) - net = BinaryDense(256, b_init=None, name='dense')(net) - net = BatchNorm(act=tl.act.htanh, name='bn3')(net) - - net = Sign("sign3")(net) - net = BinaryDense(10, b_init=None, name='bout')(net) - net = BatchNorm(name='bno')(net) - net = Model(inputs=net_in, outputs=net, name='binarynet') - return net - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -n_epoch = 200 -print_freq = 5 - -net = model([None, 28, 28, 1]) -train_op = tf.optimizers.Adam(learning_rate=0.0001) -cost = tl.cost.cross_entropy - -for epoch in range(n_epoch): - start_time = time.time() - train_loss, train_acc, n_batch = 0, 0, 0 - net.train() - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - _loss, acc = _train_step(net, X_train_a, y_train_a, cost=cost, train_op=train_op, acc=accuracy) - train_loss += _loss - train_acc += acc - n_batch += 1 - - # print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - # print(" train loss: %f" % (train_loss / n_batch)) - # print(" train acc: %f" % (train_acc / n_batch)) - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - val_loss, val_acc, val_batch = 0, 0, 0 - net.eval() - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True): - _logits = net(X_val_a) - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - val_batch += 1 - print(" val loss: {}".format(val_loss / val_batch)) - print(" val acc: {}".format(val_acc / val_batch)) - -net.test() -test_loss, test_acc, n_test_batch = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True): - _logits = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_test_batch += 1 -print(" test loss: %f" % (test_loss / n_test_batch)) -print(" test acc: %f" % (test_acc / n_test_batch)) diff --git a/examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py b/examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py deleted file mode 100644 index 5ebb7cfa6..000000000 --- a/examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py +++ /dev/null @@ -1,211 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" - -- 1. This model has 1,068,298 paramters and Dorefa compression strategy(weight:1 bit, active: 3 bits), -after 500 epoches' training with GPU,accurcy of 81.1% was found. - -- 2. For simplified CNN layers see "Convolutional layer (Simplified)" -in read the docs website. - -- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !! - -Links -------- -.. paper:https://arxiv.org/abs/1606.06160 -.. code:https://github.com/XJTUWYD/DoReFa_Cifar10 - -Note ------- -The optimizers between official code and this code are different. - -Description ------------ -The images are processed as follows: -.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training. -.. They are approximately whitened to make the model insensitive to dynamic range. - -For training, we additionally apply a series of random distortions to -artificially increase the data set size: -.. Randomly flip the image from left to right. -.. Randomly distort the image brightness. -.. Randomly distort the image contrast. - -Speed Up --------- -Reading images from disk and distorting them can use a non-trivial amount -of processing time. To prevent these operations from slowing down training, -we run them inside 16 separate threads which continuously fill a TensorFlow queue. - -""" - -import multiprocessing -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (Conv2d, Dense, DorefaConv2d, DorefaDense, Flatten, Input, LocalResponseNorm, MaxPool2d) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` -# prepare cifar10 data -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def dorefanet_model(input_shape, n_classes): - in_net = Input(shape=input_shape, name='input') - net = Conv2d(32, (5, 5), (1, 1), act='relu', padding='SAME', name='conv1')(in_net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) - net = tl.layers.Sign("sign")(net) - net = DorefaConv2d(8, 32, 64, (5, 5), (1, 1), act='relu', padding='SAME', name='DorefaConv1')(net) - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) - net = Flatten(name='flatten')(net) - net = DorefaDense(8, 16, 384, act='relu', name='DorefaDense1')(net) - net = DorefaDense(8, 16, 192, act='relu', name='DorefaDense2')(net) - net = Dense(n_classes, act=None, name='output')(net) - net = Model(inputs=in_net, outputs=net, name='dorefanet') - return net - - -# training settings -net = dorefanet_model([None, 24, 24, 3], n_classes=10) -batch_size = 128 -n_epoch = 50000 -learning_rate = 0.0001 -print_freq = 5 -n_step_epoch = int(len(y_train) / batch_size) -n_step = n_epoch * n_step_epoch -shuffle_buffer_size = 128 - -optimizer = tf.optimizers.Adam(learning_rate) -# optimizer = tf.optimizers.SGD(learning_rate) -cost = tl.cost.cross_entropy - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def generator_test(): - inputs = X_test - targets = y_test - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -def _map_fn_test(img, target): - # 1. Crop the central [height, width] of the image. - img = tf.image.resize_with_pad(img, 24, 24) - # 2. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - img = tf.reshape(img, (24, 24, 3)) - target = tf.reshape(target, ()) - return img, target - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -# dataset API and augmentation -train_ds = tf.data.Dataset.from_generator( - generator_train, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) -# train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) -# value = train_ds.make_one_shot_iterator().get_next() - -test_ds = tf.data.Dataset.from_generator( - generator_test, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -# test_ds = test_ds.shuffle(shuffle_buffer_size) -test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) -# test_ds = test_ds.repeat(n_epoch) -test_ds = test_ds.prefetch(buffer_size=4096) -test_ds = test_ds.batch(batch_size) -# value_test = test_ds.make_one_shot_iterator().get_next() - -for epoch in range(n_epoch): - start_time = time.time() - - train_loss, train_acc, n_iter = 0, 0, 0 - net.train() - for X_batch, y_batch in train_ds: - _loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy) - - train_loss += _loss - train_acc += acc - n_iter += 1 - - # use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - net.eval() - val_loss, val_acc, n_val_iter = 0, 0, 0 - for X_batch, y_batch in test_ds: - _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_val_iter += 1 - print(" val loss: {}".format(val_loss / n_val_iter)) - print(" val acc: {}".format(val_acc / n_val_iter)) - -# use testing data to evaluate the model -net.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in test_ds: - _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/quantized_net/tutorial_dorefanet_mnist_cnn.py b/examples/quantized_net/tutorial_dorefanet_mnist_cnn.py deleted file mode 100644 index 1cfd68124..000000000 --- a/examples/quantized_net/tutorial_dorefanet_mnist_cnn.py +++ /dev/null @@ -1,101 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (BatchNorm, Dense, DorefaConv2d, DorefaDense, Flatten, Input, MaxPool2d) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - -batch_size = 128 - - -def model(inputs_shape, n_class=10): - in_net = Input(inputs_shape, name='input') - net = DorefaConv2d(1, 3, 32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(in_net) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net) - net = BatchNorm(act=tl.act.htanh, name='bn1')(net) - - net = DorefaConv2d(1, 3, 64, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn2')(net) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net) - net = BatchNorm(act=tl.act.htanh, name='bn2')(net) - - net = Flatten('flatten')(net) - net = DorefaDense(1, 3, 256, b_init=None, name='dense')(net) - net = BatchNorm(act=tl.act.htanh, name='bn3')(net) - - net = Dense(n_class, b_init=None, name='bout')(net) - net = BatchNorm(name='bno')(net) - net = Model(inputs=in_net, outputs=net, name='dorefanet') - return net - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -n_epoch = 200 -print_freq = 5 - -net = model([None, 28, 28, 1]) -train_op = tf.optimizers.Adam(learning_rate=0.0001) -cost = tl.cost.cross_entropy - -for epoch in range(n_epoch): - start_time = time.time() - train_loss, train_acc, n_batch = 0, 0, 0 - net.train() - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - _loss, acc = _train_step(net, X_train_a, y_train_a, cost=cost, train_op=train_op, acc=accuracy) - train_loss += _loss - train_acc += acc - n_batch += 1 - - # print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - # print(" train loss: %f" % (train_loss / n_batch)) - # print(" train acc: %f" % (train_acc / n_batch)) - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - val_loss, val_acc, val_batch = 0, 0, 0 - net.eval() - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True): - _logits = net(X_val_a) - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - val_batch += 1 - print(" val loss: {}".format(val_loss / val_batch)) - print(" val acc: {}".format(val_acc / val_batch)) - -net.test() -test_loss, test_acc, n_test_batch = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True): - _logits = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_test_batch += 1 -print(" test loss: %f" % (test_loss / n_test_batch)) -print(" test acc: %f" % (test_acc / n_test_batch)) diff --git a/examples/quantized_net/tutorial_quanconv_cifar10.py b/examples/quantized_net/tutorial_quanconv_cifar10.py deleted file mode 100644 index 9b649e6f0..000000000 --- a/examples/quantized_net/tutorial_quanconv_cifar10.py +++ /dev/null @@ -1,208 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" - -- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting), -after 705 epoches' training with GPU, test accurcy of 84.0% was found. - -- 2. For simplified CNN layers see "Convolutional layer (Simplified)" -in read the docs website. - -- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !! - -Links -------- -.. paper:https://arxiv.org/abs/1712.05877 - - -Note ------- -The optimizers between official code and this code are different. - -Description ------------ -The images are processed as follows: -.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training. -.. They are approximately whitened to make the model insensitive to dynamic range. - -For training, we additionally apply a series of random distortions to -artificially increase the data set size: -.. Randomly flip the image from left to right. -.. Randomly distort the image brightness. -.. Randomly distort the image contrast. - -Speed Up --------- -Reading images from disk and distorting them can use a non-trivial amount -of processing time. To prevent these operations from slowing down training, -we run them inside 16 separate threads which continuously fill a TensorFlow queue. - -""" -import multiprocessing -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` -# prepare cifar10 data -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def model(input_shape, n_classes, bitW, bitA): - in_net = Input(shape=input_shape, name='input') - net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) - net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) - net = Flatten(name='flatten')(net) - net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net) - net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net) - net = Dense(n_classes, act=None, name='output')(net) - net = Model(inputs=in_net, outputs=net, name='dorefanet') - return net - - -# training settings -bitW = 8 -bitA = 8 -net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA) -batch_size = 128 -n_epoch = 50000 -learning_rate = 0.0001 -print_freq = 5 -n_step_epoch = int(len(y_train) / batch_size) -n_step = n_epoch * n_step_epoch -shuffle_buffer_size = 128 - -optimizer = tf.optimizers.Adam(learning_rate) -cost = tl.cost.cross_entropy - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def generator_test(): - inputs = X_test - targets = y_test - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -def _map_fn_test(img, target): - # 1. Crop the central [height, width] of the image. - img = tf.image.resize_with_pad(img, 24, 24) - # 2. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - img = tf.reshape(img, (24, 24, 3)) - target = tf.reshape(target, ()) - return img, target - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -# dataset API and augmentation -train_ds = tf.data.Dataset.from_generator( - generator_train, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) -# train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) -# value = train_ds.make_one_shot_iterator().get_next() - -test_ds = tf.data.Dataset.from_generator( - generator_test, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -# test_ds = test_ds.shuffle(shuffle_buffer_size) -test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) -# test_ds = test_ds.repeat(n_epoch) -test_ds = test_ds.prefetch(buffer_size=4096) -test_ds = test_ds.batch(batch_size) -# value_test = test_ds.make_one_shot_iterator().get_next() - -for epoch in range(n_epoch): - start_time = time.time() - - train_loss, train_acc, n_iter = 0, 0, 0 - net.train() - for X_batch, y_batch in train_ds: - _loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy) - - train_loss += _loss - train_acc += acc - n_iter += 1 - - # use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - net.eval() - val_loss, val_acc, n_val_iter = 0, 0, 0 - for X_batch, y_batch in test_ds: - _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_val_iter += 1 - print(" val loss: {}".format(val_loss / n_val_iter)) - print(" val acc: {}".format(val_acc / n_val_iter)) - -# use testing data to evaluate the model -net.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in test_ds: - _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/quantized_net/tutorial_quanconv_mnist.py b/examples/quantized_net/tutorial_quanconv_mnist.py deleted file mode 100644 index 1dbfe8d4d..000000000 --- a/examples/quantized_net/tutorial_quanconv_mnist.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import ( - Dense, Dropout, Flatten, Input, MaxPool2d, QuanConv2d, QuanConv2dWithBN, QuanDense, QuanDenseLayerWithBN -) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) -# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False) - -batch_size = 128 - - -def model(inputs_shape, n_class=10): - net_in = Input(inputs_shape, name="input") - - net = QuanConv2dWithBN( - n_filter=32, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn1' - )(net_in) - net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')(net) - - net = QuanConv2dWithBN( - n_filter=64, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn2' - )(net) - net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')(net) - - net = Flatten(name='ft')(net) - - # net = QuanDense(256, act="relu", name='qdbn')(net) - # net = QuanDense(n_class, name='qdbn_out')(net) - - net = QuanDenseLayerWithBN(256, act="relu", name='qdbn')(net) - net = QuanDenseLayerWithBN(n_class, name='qdbn_out')(net) - - # net = Dense(256, act='relu', name='Dense1')(net) - # net = Dense(n_class, name='Dense2')(net) - - net = Model(inputs=net_in, outputs=net, name='quan') - return net - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -n_epoch = 200 -print_freq = 1 - -# print(sess.run(net_test.all_params)) # print real values of parameters -net = model([None, 28, 28, 1]) -train_op = tf.optimizers.Adam(learning_rate=0.0001) -cost = tl.cost.cross_entropy - -for epoch in range(n_epoch): - start_time = time.time() - train_loss, train_acc, n_iter = 0, 0, 0 - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - net.train() - _loss, acc = _train_step(net, X_train_a, y_train_a, cost=cost, train_op=train_op, acc=accuracy) - - train_loss += _loss - train_acc += acc - n_iter += 1 - - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - # net.eval() - val_loss, val_acc, n_eval = 0, 0, 0 - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True): - _logits = net(X_val_a) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - n_eval += 1 - print(" val loss: {}".format(val_loss / n_eval)) - print(" val acc: {}".format(val_acc / n_eval)) - -# net.eval() -test_loss, test_acc, n_test_batch = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True): - _logits = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_test_batch += 1 -print(" test loss: %f" % (test_loss / n_test_batch)) -print(" test acc: %f" % (test_acc / n_test_batch)) diff --git a/examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py b/examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py deleted file mode 100644 index c78686011..000000000 --- a/examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py +++ /dev/null @@ -1,221 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" - -- 1. This model has 1,068,298 paramters and TWN compression strategy(weight:1,0,-1, output: float32), -after 500 epoches' training with GPU,accurcy of 80.6% was found. - -- 2. For simplified CNN layers see "Convolutional layer (Simplified)" -in read the docs website. - -- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !! - -Links -------- -.. https://arxiv.org/abs/1605.04711 -.. https://github.com/XJTUWYD/TWN - -Note ------- -The optimizers between official code and this code are different. - -Description ------------ -The images are processed as follows: -.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training. -.. They are approximately whitened to make the model insensitive to dynamic range. - -For training, we additionally apply a series of random distortions to -artificially increase the data set size: -.. Randomly flip the image from left to right. -.. Randomly distort the image brightness. -.. Randomly distort the image contrast. - -Speed Up --------- -Reading images from disk and distorting them can use a non-trivial amount -of processing time. To prevent these operations from slowing down training, -we run them inside 16 separate threads which continuously fill a TensorFlow queue. - -""" -import multiprocessing -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import ( - Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d, TernaryConv2d, TernaryDense -) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` -# prepare cifar10 data -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def model(input_shape, n_classes): - in_net = Input(shape=input_shape, name='input') - - net = Conv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')(in_net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) - - net = TernaryConv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')(net) - net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) - net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) - - net = Flatten(name='flatten')(net) - - net = TernaryDense(384, act=tf.nn.relu, name='d1relu')(net) - net = TernaryDense(192, act=tf.nn.relu, name='d2relu')(net) - net = Dense(n_classes, act=None, name='output')(net) - - net = Model(inputs=in_net, outputs=net, name='dorefanet') - return net - - -# training settings -bitW = 8 -bitA = 8 -net = model([None, 24, 24, 3], n_classes=10) -batch_size = 128 -n_epoch = 50000 -learning_rate = 0.0001 -print_freq = 5 -n_step_epoch = int(len(y_train) / batch_size) -n_step = n_epoch * n_step_epoch -shuffle_buffer_size = 128 - -optimizer = tf.optimizers.Adam(learning_rate) -cost = tl.cost.cross_entropy - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def generator_test(): - inputs = X_test - targets = y_test - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield _input, _target - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -def _map_fn_test(img, target): - # 1. Crop the central [height, width] of the image. - img = tf.image.resize_with_pad(img, 24, 24) - # 2. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - img = tf.reshape(img, (24, 24, 3)) - target = tf.reshape(target, ()) - return img, target - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -# dataset API and augmentation -train_ds = tf.data.Dataset.from_generator( - generator_train, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) -# train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) -# value = train_ds.make_one_shot_iterator().get_next() - -test_ds = tf.data.Dataset.from_generator( - generator_test, output_types=(tf.float32, tf.int32) -) # , output_shapes=((24, 24, 3), (1))) -# test_ds = test_ds.shuffle(shuffle_buffer_size) -test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) -# test_ds = test_ds.repeat(n_epoch) -test_ds = test_ds.prefetch(buffer_size=4096) -test_ds = test_ds.batch(batch_size) -# value_test = test_ds.make_one_shot_iterator().get_next() - -for epoch in range(n_epoch): - start_time = time.time() - - train_loss, train_acc, n_iter = 0, 0, 0 - net.train() - for X_batch, y_batch in train_ds: - _loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy) - - train_loss += _loss - train_acc += acc - n_iter += 1 - - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - # use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - net.eval() - val_loss, val_acc, n_val_iter = 0, 0, 0 - for X_batch, y_batch in test_ds: - _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_val_iter += 1 - print(" val loss: {}".format(val_loss / n_val_iter)) - print(" val acc: {}".format(val_acc / n_val_iter)) - -# use testing data to evaluate the model -net.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in test_ds: - _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py b/examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py deleted file mode 100644 index a708d1f0e..000000000 --- a/examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py +++ /dev/null @@ -1,102 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import (BatchNorm, Dense, Flatten, Input, MaxPool2d, TernaryConv2d, TernaryDense) -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - -batch_size = 128 - - -def model(inputs_shape, n_class=10): - in_net = Input(inputs_shape, name='input') - net = TernaryConv2d(32, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn1')(in_net) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net) - net = BatchNorm(act=tl.act.htanh, name='bn1')(net) - - net = TernaryConv2d(64, (5, 5), (1, 1), padding='SAME', b_init=None, name='bcnn2')(net) - net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net) - net = BatchNorm(act=tl.act.htanh, name='bn2')(net) - - net = Flatten('flatten')(net) - net = Dense(256, b_init=None, name='dense')(net) - net = BatchNorm(act=tl.act.htanh, name='bn3')(net) - - net = TernaryDense(n_class, b_init=None, name='bout')(net) - net = BatchNorm(name='bno')(net) - - net = Model(inputs=in_net, outputs=net, name='dorefanet') - return net - - -def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): - with tf.GradientTape() as tape: - y_pred = network(X_batch) - _loss = cost(y_pred, y_batch) - grad = tape.gradient(_loss, network.trainable_weights) - train_op.apply_gradients(zip(grad, network.trainable_weights)) - if acc is not None: - _acc = acc(y_pred, y_batch) - return _loss, _acc - else: - return _loss, None - - -def accuracy(_logits, y_batch): - return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - - -n_epoch = 200 -print_freq = 5 - -net = model([None, 28, 28, 1]) -train_op = tf.optimizers.Adam(learning_rate=0.0001) -cost = tl.cost.cross_entropy - -for epoch in range(n_epoch): - start_time = time.time() - train_loss, train_acc, n_batch = 0, 0, 0 - net.train() - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - _loss, acc = _train_step(net, X_train_a, y_train_a, cost=cost, train_op=train_op, acc=accuracy) - train_loss += _loss - train_acc += acc - n_batch += 1 - - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - val_loss, val_acc, val_batch = 0, 0, 0 - net.eval() - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True): - _logits = net(X_val_a) - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - val_batch += 1 - print(" val loss: {}".format(val_loss / val_batch)) - print(" val acc: {}".format(val_acc / val_batch)) - -net.test() -test_loss, test_acc, n_test_batch = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True): - _logits = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_test_batch += 1 -print(" test loss: %f" % (test_loss / n_test_batch)) -print(" test acc: %f" % (test_acc / n_test_batch)) diff --git a/examples/reinforcement_learning/.gitignore b/examples/reinforcement_learning/.gitignore deleted file mode 100644 index 92fdef002..000000000 --- a/examples/reinforcement_learning/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -model/ -image/ diff --git a/examples/reinforcement_learning/README.md b/examples/reinforcement_learning/README.md deleted file mode 100644 index 633009bbf..000000000 --- a/examples/reinforcement_learning/README.md +++ /dev/null @@ -1,364 +0,0 @@ -# Comprehensive Reinforcement Learning Tutorial - -![GitHub last commit (branch)](https://img.shields.io/github/last-commit/tensorlayer/tensorlayer/master.svg) -[![Supported TF Version](https://img.shields.io/badge/TensorFlow-2.0.0%2B-brightgreen.svg)](https://github.com/tensorflow/tensorflow/releases) -[![Documentation Status](https://readthedocs.org/projects/tensorlayer/badge/)](https://tensorlayer.readthedocs.io/) -[![Build Status](https://travis-ci.org/tensorlayer/tensorlayer.svg?branch=master)](https://travis-ci.org/tensorlayer/tensorlayer) -[![Downloads](http://pepy.tech/badge/tensorlayer)](http://pepy.tech/project/tensorlayer) - -
- -

- - -
- - - -This repository contains implementations of the most popular reinforcement learning algorithms, powered by [Tensorflow 2.0](https://www.tensorflow.org/alpha/guide/effective_tf2) and Tensorlayer 2.0. We aim to make the reinforcement learning tutorial simple, transparent and straight-forward, as this would not only benefits new learners of reinforcement learning, but also provide convenience for senior researchers to testify their new ideas quickly. - -A corresponding [Springer textbook](https://deepreinforcementlearningbook.org) is also provided, you can get the free PDF if your institute has Springer license. We also released an [RLzoo](https://github.com/tensorlayer/RLzoo) for simple usage. - -
- -
- -
- -
-
- -## Prerequisites: - -* python 3.5 -* tensorflow >= 2.0.0 or tensorflow-gpu >= 2.0.0a0 -* tensorlayer >= 2.0.1 -* tensorflow-probability - -*** If you meet the error`AttributeError: module 'tensorflow' has no attribute 'contrib'` when running the code after installing tensorflow-probability, try: - -`pip install --upgrade tf-nightly-2.0-preview tfp-nightly` - -## Status: Beta - -We are currently open to any suggestions or pull requests from you to make the reinforcement learning tutorial with TensorLayer2.0 a better code repository for both new learners and senior researchers. Some of the algorithms mentioned in the this markdown may be not yet available, since we are still trying to implement more RL algorithms and optimize their performances. However, those algorithms listed above will come out in a few weeks, and the repository will keep updating more advanced RL algorithms in the future. - -## To Use: - -For each tutorial, open a terminal and run: - - `python ***.py --train` for training and `python ***.py --test` for testing. - -The tutorial algorithms follow the same basic structure, as shown in file: [`./tutorial_format.py`](https://github.com/tensorlayer/tensorlayer/blob/reinforcement-learning/examples/reinforcement_learning/tutorial_format.py) - -The pretrained models and learning curves for each algorithm are stored [here](https://github.com/tensorlayer/pretrained-models). You can download the models and load the weights in the policies for tests. - -## Table of Contents: -### value-based -| Algorithms | Action Space | Tutorial Env | Papers | -| --------------- | ------------ | -------------- | -------| -|**value-based**|||| -| Q-learning | Discrete | FrozenLake | [Technical note: Q-learning. Watkins et al. 1992](http://www.gatsby.ucl.ac.uk/~dayan/papers/cjch.pdf)| -| Deep Q-Network (DQN)| Discrete | FrozenLake | [Human-level control through deep reinforcement learning, Mnih et al. 2015.](https://www.nature.com/articles/nature14236/) | -| Prioritized Experience Replay | Discrete | Pong, CartPole | [Schaul et al. Prioritized experience replay. Schaul et al. 2015.](https://arxiv.org/abs/1511.05952) | -|Dueling DQN|Discrete | Pong, CartPole |[Dueling network architectures for deep reinforcement learning. Wang et al. 2015.](https://arxiv.org/abs/1511.06581)| -|Double DQN| Discrete | Pong, CartPole |[Deep reinforcement learning with double q-learning. Van et al. 2016.](https://arxiv.org/abs/1509.06461)| -|Noisy DQN|Discrete | Pong, CartPole |[Noisy networks for exploration. Fortunato et al. 2017.](https://arxiv.org/pdf/1706.10295.pdf)| -| Distributed DQN (C51)| Discrete | Pong, CartPole | [A distributional perspective on reinforcement learning. Bellemare et al. 2017.](https://arxiv.org/pdf/1707.06887.pdf) | -|**policy-based**|||| -|REINFORCE(PG) |Discrete/Continuous|CartPole | [Reinforcement learning: An introduction. Sutton et al. 2011.](https://www.cambridge.org/core/journals/robotica/article/robot-learning-edited-by-jonathan-h-connell-and-sridhar-mahadevan-kluwer-boston-19931997-xii240-pp-isbn-0792393651-hardback-21800-guilders-12000-8995/737FD21CA908246DF17779E9C20B6DF6)| -| Trust Region Policy Optimization (TRPO)| Discrete/Continuous | Pendulum | [Abbeel et al. Trust region policy optimization. Schulman et al.2015.](https://arxiv.org/pdf/1502.05477.pdf) | -| Proximal Policy Optimization (PPO) |Discrete/Continuous |Pendulum| [Proximal policy optimization algorithms. Schulman et al. 2017.](https://arxiv.org/abs/1707.06347) | -|Distributed Proximal Policy Optimization (DPPO)|Discrete/Continuous |Pendulum|[Emergence of locomotion behaviours in rich environments. Heess et al. 2017.](https://arxiv.org/abs/1707.02286)| -|**actor-critic**|||| -|Actor-Critic (AC)|Discrete/Continuous|CartPole| [Actor-critic algorithms. Konda er al. 2000.](https://papers.nips.cc/paper/1786-actor-critic-algorithms.pdf)| -| Asynchronous Advantage Actor-Critic (A3C)| Discrete/Continuous | BipedalWalker| [Asynchronous methods for deep reinforcement learning. Mnih et al. 2016.](https://arxiv.org/pdf/1602.01783.pdf) | -| DDPG|Discrete/Continuous |Pendulum| [Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016](https://arxiv.org/pdf/1509.02971.pdf) | -|TD3|Discrete/Continuous |Pendulum|[Addressing function approximation error in actor-critic methods. Fujimoto et al. 2018.](https://arxiv.org/pdf/1802.09477.pdf)| -|Soft Actor-Critic (SAC)|Discrete/Continuous |Pendulum|[Soft actor-critic algorithms and applications. Haarnoja et al. 2018.](https://arxiv.org/abs/1812.05905)| - -## Examples of RL Algorithms: - -* **Q-learning** - - Code: `./tutorial_Qlearning.py` - - Paper: [Technical Note Q-Learning](http://www.gatsby.ucl.ac.uk/~dayan/papers/cjch.pdf) - - Description: - - ``` - Q-learning is a non-deep-learning method with TD Learning, Off-Policy, e-Greedy Exploration. - - Central formula: - Q(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A)) - - See David Silver RL Tutorial Lecture 5 - Q-Learning for more details. - ``` - - -* **Deep Q-Network (DQN)** - - Code: `./tutorial_DQN.py` - - Paper: [Human-level control through deep reinforcementlearning](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) - - [Playing Atari with Deep Reinforcement Learning](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) - - Description: - - ``` - Deep Q-Network (DQN) is a method of TD Learning, Off-Policy, e-Greedy Exploration (GLIE). - - Central formula: - Q(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A)), - delta_w = R + lambda * Q(newS, newA). - - See David Silver RL Tutorial Lecture 5 - Q-Learning for more details. - ``` - - - -* **Double DQN / Dueling DQN / Noisy DQN** - - Code: `./tutorial_DQN_variants.py` - - Paper: [Deep Reinforcement Learning with Double Q-learning](https://arxiv.org/abs/1509.06461) - - Description: - - ``` - We implement Double DQN, Dueling DQN and Noisy DQN here. - - -The max operator in standard DQN uses the same values both to select and to evaluate an action by: - - Q(s_t, a_t) = R\_{t+1\} + gamma \* max\_{a}Q\_\{target\}(s_{t+1}, a). - - -Double DQN proposes to use following evaluation to address overestimation problem of max operator: - - Q(s_t, a_t) = R\_{t+1\} + gamma \* Q\_{target}(s\_\{t+1\}, max{a}Q(s_{t+1}, a)). - - -Dueling DQN uses dueling architecture where the value of state and the advantage of each action is estimated separately. - - -Noisy DQN propose to explore by adding parameter noises. - ``` - - - - - -* **Prioritized Experience Replay** - - Code: `./tutorial_prioritized_replay.py` - - Paper: [Prioritized Experience Replay](https://arxiv.org/abs/1511.05952) - - Description: - - ``` - Prioritized experience replay is an efficient replay method that replay important transitions more frequently. Segment tree data structure is used to speed up indexing. - ``` - - - -* **Distributed DQN (C51)** - - Code: `./tutorial_C51.py` - - Paper: [A Distributional Perspective on Reinforcement Learning](https://arxiv.org/pdf/1707.06887.pdf) - - Description: - - ``` - Categorical 51 distributional RL algorithm is a distrbuted DQN, where 51 means the number of atoms. In this algorithm, instead of estimating actual expected value, value distribution over a series of continuous sub-intervals (atoms) is considered. - ``` - - -* **Actor-Critic (AC)** - - Code:`./tutorial_AC.py` - - Paper: [Actor-Critic Algorithms](https://papers.nips.cc/paper/1786-actor-critic-algorithms.pdf) - - Description: - - ``` - The implementation of Advantage Actor-Critic, using TD-error as the advantage. - ``` - - - -* **Asynchronous Advantage Actor-Critic (A3C)** - - Code: `./tutorial_A3C.py` - - Paper: [Asynchronous Methods for Deep Reinforcement Learning](https://arxiv.org/pdf/1602.01783.pdf) - - Description: - - ``` - The implementation of Asynchronous Advantage Actor-Critic (A3C), using multi-threading for distributed policy learning on Actor-Critic structure. - ``` - - - -* **Soft Actor-Critic (SAC)** - - Code: `./tutorial_SAC.py` - - Paper: [Soft Actor-Critic Algorithms and Applications](https://arxiv.org/pdf/1812.05905.pdf) - - Description: - - ``` - Actor policy in SAC is stochastic, with off-policy training. And 'soft' in SAC indicates the trade-off between the entropy and expected return. The additional consideration of entropy term helps with more explorative policy. And this implementation contains an automatic update for the entropy factor. - - This version of Soft Actor-Critic (SAC) implementation contains 5 networks: - 2 Q-networks, 2 target Q-networks and 1 policy network. - ``` - - - - -* **Vanilla Policy Gradient (PG or REINFORCE)** - - Code: `./tutorial_PG.py` - - Paper: [Policy Gradient Methods for Reinforcement Learning with Function Approximation](https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf) - - Description: - - ``` - The policy gradient algorithm works by updating policy parameters via stochastic gradient ascent on policy performance. It's an on-policy algorithm can be used for environments with either discrete or continuous action spaces. - - To apply it on continuous action space, you need to change the last softmax layer and the choose_action function. - ``` - - - -* **Deep Deterministic Policy Gradient (DDPG)** - - Code: `./tutorial_DDPG.py` - - Paper: [Continuous Control With Deep Reinforcement Learning](https://arxiv.org/pdf/1509.02971.pdf) - - Description: - - ``` - An algorithm concurrently learns a Q-function and a policy. - - It uses off-policy data and the Bellman equation to learn the Q-function, and uses the Q-function to learn the policy. - ``` - - - - -* **Twin Delayed DDPG (TD3)** - - Code: `./tutorial_TD3.py` - - Paper: [Addressing Function Approximation Error in Actor-Critic Methods](https://arxiv.org/pdf/1802.09477.pdf) - - Description: - - ``` - DDPG suffers from problems like overestimate of Q-values and sensitivity to hyper-parameters. - - Twin Delayed DDPG (TD3) is a variant of DDPG with several tricks: - - - Trick One: Clipped Double-Q Learning. TD3 learns two Q-functions instead of one (hence “twin”), and uses the smaller of the two Q-values to form the targets in the Bellman error loss functions. - - Trick Two: “Delayed” Policy Updates. TD3 updates the policy (and target networks) less frequently than the Q-function. - - Trick Three: Target Policy Smoothing. TD3 adds noise to the target action, to make it harder for the policy to exploit Q-function errors by smoothing out Q along changes in action. - - The implementation of TD3 includes 6 networks: - 2 Q-networks, 2 target Q-networks, 1 policy network, 1 target policy network. - - Actor policy in TD3 is deterministic, with Gaussian exploration noise. - ``` - - - -* **Trust Region Policy Optimization (TRPO)** - - Code: `./tutorial_TRPO.py` - - Paper: [Trust Region Policy Optimization](https://arxiv.org/pdf/1502.05477.pdf) - - Description: - - ``` - PG method with a large step can crash the policy performance, even with a small step can lead a large differences in policy. - - TRPO constraints the step in policy space using KL divergence (rather than in parameter space), which can monotonically improve performance and avoid a collapsed update. - ``` - - - -* **Proximal Policy Optimization (PPO)** - - Code: `./tutorial_PPO.py` - - Paper: [Proximal Policy Optimization Algorithms](https://arxiv.org/pdf/1707.06347.pdf) - - Description: - - ``` - A simple version of Proximal Policy Optimization (PPO) using single thread. - - PPO is a family of first-order methods that use a few other tricks to keep new policies close to old. - - PPO methods are significantly simpler to implement, and empirically seem to perform at least as well as TRPO. - - - ``` - - - -* **Distributed Proximal Policy Optimization (DPPO)** - - Code: `./tutorial_DPPO.py` - - Paper: [Emergence of Locomotion Behaviours in Rich Environments](https://arxiv.org/pdf/1707.02286.pdf) - - Description: - - ``` - A distributed version of OpenAI's Proximal Policy Optimization (PPO). - - Distribute the workers to collect data in parallel, then stop worker's roll-out and train PPO on collected data. - ``` - - - -* **More in recent weeks** - -## Environment: - -We typically apply game environments in [Openai Gym](https://gym.openai.com/) for our tutorials. For other environment sources like [DeepMind Control Suite](https://github.com/deepmind/dm_control) and [Marathon-Envs in Unity](https://github.com/Unity-Technologies/marathon-envs), they all have wrappers to convert into format of Gym environments, see [here](https://github.com/martinseilair/dm_control2gym) and [here](https://github.com/Unity-Technologies/marathon-envs/tree/master/gym-unity). - -Our env wrapper: `./tutorial_wrappers.py` - -## Authors -- @zsdonghao Hao Dong: AC, A3C, Q-Learning, DQN, PG -- @quantumiracle Zihan Ding: SAC, TD3. -- @Tokarev-TT-33 Tianyang Yu @initial-h Hongming Zhang : PG, DDPG, PPO, DPPO, TRPO -- @Officium Yanhua Huang: C51, DQN_variants, prioritized_replay, wrappers. - -## Recommended Materials - -- [李宏毅RL视频](https://www.bilibili.com/video/av58458003?from=search&seid=962941912089186406) -- [CS885 Spring 2018 - Reinforcement Learning by Pascal Poupart](https://cs.uwaterloo.ca/~ppoupart/teaching/cs885-spring18/schedule.html) -- [Youtube Video By David Silver, 2015 @ UCL](https://www.youtube.com/playlist?list=PLzuuYNsE1EZAXYR4FJ75jcJseBmo4KQ9-) -- [Teaching Materials By David Silver @ UCL](http://www0.cs.ucl.ac.uk/staff/D.Silver/web/Teaching.html) -- [Deep Reinforcement Learning: Fundamentals, Research and Applications By Hao Dong, Zihan Ding, Shanghang Zhang etc](http://deep-reinforcement-learning-book.github.io/) diff --git a/examples/reinforcement_learning/tutorial_A3C.py b/examples/reinforcement_learning/tutorial_A3C.py deleted file mode 100644 index f20530ebf..000000000 --- a/examples/reinforcement_learning/tutorial_A3C.py +++ /dev/null @@ -1,323 +0,0 @@ -""" -Asynchronous Advantage Actor Critic (A3C) with Continuous Action Space. - -Actor Critic History ----------------------- -A3C > DDPG (for continuous action space) > AC - -Advantage ----------- -Train faster and more stable than AC. - -Disadvantage -------------- -Have bias. - -Reference ----------- -Original Paper: https://arxiv.org/pdf/1602.01783.pdf -MorvanZhou's tutorial: https://morvanzhou.github.io/tutorials/ -MorvanZhou's code: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/experiments/Solve_BipedalWalker/A3C.py - -Environment ------------ -BipedalWalker-v2 : https://gym.openai.com/envs/BipedalWalker-v2 - -Reward is given for moving forward, total 300+ points up to the far end. -If the robot falls, it gets -100. Applying motor torque costs a small amount of -points, more optimal agent will get better score. State consists of hull angle -speed, angular velocity, horizontal speed, vertical speed, position of joints -and joints angular speed, legs contact with ground, and 10 lidar rangefinder -measurements. There's no coordinates in the state vector. - -Prerequisites --------------- -tensorflow 2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer 2.0.0 -&& -pip install box2d box2d-kengz --user - -To run ------- -python tutorial_A3C.py --train/test - -""" - -import argparse -import multiprocessing -import os -import threading -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorflow_probability as tfp -import tensorlayer as tl - -tfd = tfp.distributions - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'BipedalWalker-v2' # BipedalWalkerHardcore-v2 BipedalWalker-v2 LunarLanderContinuous-v2 -RANDOM_SEED = 2 # random seed, can be either an int number or None -RENDER = False # render while training - -ALG_NAME = 'A3C' -N_WORKERS = multiprocessing.cpu_count() # number of workers according to number of cores in cpu -# N_WORKERS = 2 # manually set number of workers -MAX_GLOBAL_EP = 15000 # number of training episodes -TEST_EPISODES = 10 # number of training episodes -GLOBAL_NET_SCOPE = 'Global_Net' -UPDATE_GLOBAL_ITER = 10 # update global policy after several episodes -GAMMA = 0.99 # reward discount factor -ENTROPY_BETA = 0.005 # factor for entropy boosted exploration -LR_A = 0.00005 # learning rate for actor -LR_C = 0.0001 # learning rate for critic -GLOBAL_RUNNING_R = [] -GLOBAL_EP = 0 # will increase during training, stop training when it >= MAX_GLOBAL_EP - -################### Asynchronous Advantage Actor Critic (A3C) #################################### - - -class ACNet(object): - - def __init__(self, scope): - self.scope = scope - - w_init = tf.keras.initializers.glorot_normal(seed=None) # initializer, glorot=xavier - - def get_actor(input_shape): # policy network - with tf.name_scope(self.scope): - ni = tl.layers.Input(input_shape, name='in') - nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='la')(ni) - nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='la2')(nn) - mu = tl.layers.Dense(n_units=N_A, act=tf.nn.tanh, W_init=w_init, name='mu')(nn) - sigma = tl.layers.Dense(n_units=N_A, act=tf.nn.softplus, W_init=w_init, name='sigma')(nn) - return tl.models.Model(inputs=ni, outputs=[mu, sigma], name=scope + '/Actor') - - self.actor = get_actor([None, N_S]) - self.actor.train() # train mode for Dropout, BatchNorm - - def get_critic(input_shape): # we use Value-function here, but not Q-function. - with tf.name_scope(self.scope): - ni = tl.layers.Input(input_shape, name='in') - nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='lc')(ni) - nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='lc2')(nn) - v = tl.layers.Dense(n_units=1, W_init=w_init, name='v')(nn) - return tl.models.Model(inputs=ni, outputs=v, name=scope + '/Critic') - - self.critic = get_critic([None, N_S]) - self.critic.train() # train mode for Dropout, BatchNorm - - @tf.function # convert numpy functions to tf.Operations in the TFgraph, return tensor - def update_global( - self, buffer_s, buffer_a, buffer_v_target, globalAC - ): # refer to the global Actor-Crtic network for updating it with samples - ''' update the global critic ''' - with tf.GradientTape() as tape: - self.v = self.critic(buffer_s) - self.v_target = buffer_v_target - td = tf.subtract(self.v_target, self.v, name='TD_error') - self.c_loss = tf.reduce_mean(tf.square(td)) - self.c_grads = tape.gradient(self.c_loss, self.critic.trainable_weights) - OPT_C.apply_gradients(zip(self.c_grads, globalAC.critic.trainable_weights)) # local grads applies to global net - # del tape # Drop the reference to the tape - ''' update the global actor ''' - with tf.GradientTape() as tape: - self.mu, self.sigma = self.actor(buffer_s) - self.test = self.sigma[0] - self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5 - - normal_dist = tfd.Normal(self.mu, self.sigma) # no tf.contrib for tf2.0 - self.a_his = buffer_a # float32 - log_prob = normal_dist.log_prob(self.a_his) - exp_v = log_prob * td # td is from the critic part, no gradients for it - entropy = normal_dist.entropy() # encourage exploration - self.exp_v = ENTROPY_BETA * entropy + exp_v - self.a_loss = tf.reduce_mean(-self.exp_v) - self.a_grads = tape.gradient(self.a_loss, self.actor.trainable_weights) - OPT_A.apply_gradients(zip(self.a_grads, globalAC.actor.trainable_weights)) # local grads applies to global net - return self.test # for test purpose - - @tf.function - def pull_global(self, globalAC): # run by a local, pull weights from the global nets - for l_p, g_p in zip(self.actor.trainable_weights, globalAC.actor.trainable_weights): - l_p.assign(g_p) - for l_p, g_p in zip(self.critic.trainable_weights, globalAC.critic.trainable_weights): - l_p.assign(g_p) - - def get_action(self, s, greedy=False): # run by a local - s = s[np.newaxis, :] - self.mu, self.sigma = self.actor(s) - - with tf.name_scope('wrap_a_out'): - self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5 - if greedy: - return self.mu.numpy()[0] - normal_dist = tfd.Normal(self.mu, self.sigma) # for continuous action space - self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND) - return self.A.numpy()[0] - - def save(self): # save trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_npz(self.actor.trainable_weights, name=os.path.join(path, 'model_actor.npz')) - tl.files.save_npz(self.critic.trainable_weights, name=os.path.join(path, 'model_critic.npz')) - - def load(self): # load trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_and_assign_npz(name=os.path.join(path, 'model_actor.npz'), network=self.actor) - tl.files.load_and_assign_npz(name=os.path.join(path, 'model_critic.npz'), network=self.critic) - - -class Worker(object): - - def __init__(self, name): - self.env = gym.make(ENV_ID) - self.name = name - self.AC = ACNet(name) - - # def work(self): - def work(self, globalAC): - global GLOBAL_RUNNING_R, GLOBAL_EP - total_step = 1 - buffer_s, buffer_a, buffer_r = [], [], [] - while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP: - s = self.env.reset() - ep_r = 0 - while True: - # visualize Worker_0 during training - if RENDER and self.name == 'Worker_0' and total_step % 30 == 0: - self.env.render() - s = s.astype('float32') # double to float - a = self.AC.get_action(s) - s_, r, done, _info = self.env.step(a) - - s_ = s_.astype('float32') # double to float - # set robot falls reward to -2 instead of -100 - if r == -100: r = -2 - - ep_r += r - buffer_s.append(s) - buffer_a.append(a) - buffer_r.append(r) - - if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net - - if done: - v_s_ = 0 # terminal - else: - v_s_ = self.AC.critic(s_[np.newaxis, :])[0, 0] # reduce dim from 2 to 0 - - buffer_v_target = [] - - for r in buffer_r[::-1]: # reverse buffer r - v_s_ = r + GAMMA * v_s_ - buffer_v_target.append(v_s_) - - buffer_v_target.reverse() - - buffer_s = tf.convert_to_tensor(np.vstack(buffer_s)) - buffer_a = tf.convert_to_tensor(np.vstack(buffer_a)) - buffer_v_target = tf.convert_to_tensor(np.vstack(buffer_v_target).astype('float32')) - - # update gradients on global network - self.AC.update_global(buffer_s, buffer_a, buffer_v_target, globalAC) - buffer_s, buffer_a, buffer_r = [], [], [] - - # update local network from global network - self.AC.pull_global(globalAC) - - s = s_ - total_step += 1 - if done: - if len(GLOBAL_RUNNING_R) == 0: # record running episode reward - GLOBAL_RUNNING_R.append(ep_r) - else: # moving average - GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r) - print('Training | {}, Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \ - .format(self.name, GLOBAL_EP, MAX_GLOBAL_EP, ep_r, time.time() - T0)) - GLOBAL_EP += 1 - break - - -if __name__ == "__main__": - - env = gym.make(ENV_ID) - # reproducible - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - - N_S = env.observation_space.shape[0] - N_A = env.action_space.shape[0] - - A_BOUND = [env.action_space.low, env.action_space.high] - A_BOUND[0] = A_BOUND[0].reshape(1, N_A) - A_BOUND[1] = A_BOUND[1].reshape(1, N_A) - - with tf.device("/cpu:0"): - GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params - - T0 = time.time() - if args.train: - # ============================= TRAINING =============================== - with tf.device("/cpu:0"): - OPT_A = tf.optimizers.RMSprop(LR_A, name='RMSPropA') - OPT_C = tf.optimizers.RMSprop(LR_C, name='RMSPropC') - workers = [] - # Create worker - for i in range(N_WORKERS): - i_name = 'Worker_%i' % i # worker name - workers.append(Worker(i_name)) - - COORD = tf.train.Coordinator() - - # start TF threading - worker_threads = [] - for worker in workers: - job = lambda: worker.work(GLOBAL_AC) - t = threading.Thread(target=job) - t.start() - worker_threads.append(t) - COORD.join(worker_threads) - - GLOBAL_AC.save() - - plt.plot(GLOBAL_RUNNING_R) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - # ============================= EVALUATION ============================= - GLOBAL_AC.load() - for episode in range(TEST_EPISODES): - s = env.reset() - episode_reward = 0 - while True: - env.render() - s = s.astype('float32') # double to float - a = GLOBAL_AC.get_action(s, greedy=True) - s, r, d, _ = env.step(a) - episode_reward += r - if d: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - T0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_AC.py b/examples/reinforcement_learning/tutorial_AC.py deleted file mode 100644 index c497e714a..000000000 --- a/examples/reinforcement_learning/tutorial_AC.py +++ /dev/null @@ -1,277 +0,0 @@ -""" -Actor-Critic -------------- -It uses TD-error as the Advantage. - -Actor Critic History ----------------------- -A3C > DDPG > AC - -Advantage ----------- -AC converge faster than Policy Gradient. - -Disadvantage (IMPORTANT) ------------------------- -The Policy is oscillated (difficult to converge), DDPG can solve -this problem using advantage of DQN. - -Reference ----------- -paper: https://papers.nips.cc/paper/1786-actor-critic-algorithms.pdf -View more on MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/ - -Environment ------------- -CartPole-v0: https://gym.openai.com/envs/CartPole-v0 - -A pole is attached by an un-actuated joint to a cart, which moves along a -frictionless track. The system is controlled by applying a force of +1 or -1 -to the cart. The pendulum starts upright, and the goal is to prevent it from -falling over. - -A reward of +1 is provided for every timestep that the pole remains upright. -The episode ends when the pole is more than 15 degrees from vertical, or the -cart moves more than 2.4 units from the center. - - -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorlayer >=2.0.0 - -To run ------- -python tutorial_AC.py --train/test - -""" -import argparse -import time -import matplotlib.pyplot as plt -import os - -import gym -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'CartPole-v1' # environment id -RANDOM_SEED = 2 # random seed, can be either an int number or None -RENDER = False # render while training - -ALG_NAME = 'AC' -TRAIN_EPISODES = 200 # number of overall episodes for training -TEST_EPISODES = 10 # number of overall episodes for testing -MAX_STEPS = 500 # maximum time step in one episode -LAM = 0.9 # reward discount in TD error -LR_A = 0.001 # learning rate for actor -LR_C = 0.01 # learning rate for critic - - - -############################### Actor-Critic #################################### - - -class Actor(object): - - def __init__(self, state_dim, action_num, lr=0.001): - - input_layer = tl.layers.Input([None, state_dim], name='state') - layer = tl.layers.Dense( - n_units=30, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden' - )(input_layer) - layer = tl.layers.Dense(n_units=action_num, name='actions')(layer) - self.model = tl.models.Model(inputs=input_layer, outputs=layer, name="Actor") - - self.model.train() - self.optimizer = tf.optimizers.Adam(lr) - - def learn(self, state, action, td_error): - with tf.GradientTape() as tape: - _logits = self.model(np.array([state])) - ## cross-entropy loss weighted by td-error (advantage), - # the cross-entropy mearsures the difference of two probability distributions: the predicted logits and sampled action distribution, - # then weighted by the td-error: small difference of real and predict actions for large td-error (advantage); and vice versa. - _exp_v = tl.rein.cross_entropy_reward_loss(logits=_logits, actions=[action], rewards=td_error[0]) - grad = tape.gradient(_exp_v, self.model.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights)) - return _exp_v - - def get_action(self, state, greedy=False): - _logits = self.model(np.array([state])) - _probs = tf.nn.softmax(_logits).numpy() - if greedy: - return np.argmax(_probs.ravel()) - return tl.rein.choice_action_by_probs(_probs.ravel()) # sample according to probability distribution - - def save(self): # save trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_npz(self.model.trainable_weights, name=os.path.join(path, 'model_actor.npz')) - - def load(self): # load trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_and_assign_npz(name=os.path.join(path, 'model_actor.npz'), network=self.model) - - -class Critic(object): - - def __init__(self, state_dim, lr=0.01): - input_layer = tl.layers.Input([1, state_dim], name='state') - layer = tl.layers.Dense( - n_units=30, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden' - )(input_layer) - layer = tl.layers.Dense(n_units=1, act=None, name='value')(layer) - self.model = tl.models.Model(inputs=input_layer, outputs=layer, name="Critic") - self.model.train() - - self.optimizer = tf.optimizers.Adam(lr) - - def learn(self, state, reward, state_, done): - d = 0 if done else 1 - v_ = self.model(np.array([state_])) - with tf.GradientTape() as tape: - v = self.model(np.array([state])) - ## TD_error = r + d * lambda * V(newS) - V(S) - td_error = reward + d * LAM * v_ - v - loss = tf.square(td_error) - grad = tape.gradient(loss, self.model.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights)) - return td_error - - def save(self): # save trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_npz(self.model.trainable_weights, name=os.path.join(path, 'model_critic.npz')) - - def load(self): # load trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_and_assign_npz(name=os.path.join(path, 'model_critic.npz'), network=self.model) - - -if __name__ == '__main__': - ''' - choose environment - 1. Openai gym: - env = gym.make() - 2. DeepMind Control Suite: - env = dm_control2gym.make() - ''' - env = gym.make(ENV_ID).unwrapped - # dm_control2gym.create_render_mode('example mode', show=True, return_pixel=False, height=240, width=320, camera_id=-1, overlays=(), - # depth=False, scene_option=None) - # env = dm_control2gym.make(domain_name="cartpole", task_name="balance") - - env.seed(RANDOM_SEED) # reproducible - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) # reproducible - - N_F = env.observation_space.shape[0] - N_A = env.action_space.n - - print("observation dimension: %d" % N_F) # 4 - print("observation high: %s" % env.observation_space.high) # [ 2.4 , inf , 0.41887902 , inf] - print("observation low : %s" % env.observation_space.low) # [-2.4 , -inf , -0.41887902 , -inf] - print("num of actions: %d" % N_A) # 2 : left or right - - actor = Actor(state_dim=N_F, action_num=N_A, lr=LR_A) - # we need a good teacher, so the teacher should learn faster than the actor - critic = Critic(state_dim=N_F, lr=LR_C) - - t0 = time.time() - if args.train: - all_episode_reward = [] - for episode in range(TRAIN_EPISODES): - state = env.reset().astype(np.float32) - step = 0 # number of step in this episode - episode_reward = 0 # rewards of all steps - while True: - if RENDER: env.render() - - action = actor.get_action(state) - - state_new, reward, done, info = env.step(action) - state_new = state_new.astype(np.float32) - - if done: reward = -20 # reward shaping trick - # these may helpful in some tasks - # if abs(s_new[0]) >= env.observation_space.high[0]: - # # cart moves more than 2.4 units from the center - # r = -20 - # reward for the distance between cart to the center - # r -= abs(s_new[0]) * .1 - - episode_reward += reward - - try: - td_error = critic.learn( - state, reward, state_new, done - ) # learn Value-function : gradient = grad[r + lambda * V(s_new) - V(s)] - actor.learn(state, action, td_error) # learn Policy : true_gradient = grad[logPi(s, a) * td_error] - except KeyboardInterrupt: # if Ctrl+C at running actor.learn(), then save model, or exit if not at actor.learn() - actor.save() - critic.save() - - state = state_new - step += 1 - - if done or step >= MAX_STEPS: - break - - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - - print('Training | Episode: {}/{} | Episode Reward: {:.0f} | Running Time: {:.4f}' \ - .format(episode + 1, TRAIN_EPISODES, episode_reward, time.time() - t0)) - - # Early Stopping for quick check - if step >= MAX_STEPS: - print("Early Stopping") # Hao Dong: it is important for this task - break - actor.save() - critic.save() - - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - actor.load() - critic.load() - - for episode in range(TEST_EPISODES): - episode_time = time.time() - state = env.reset().astype(np.float32) - t = 0 # number of step in this episode - episode_reward = 0 - while True: - env.render() - action = actor.get_action(state, greedy=True) - state_new, reward, done, info = env.step(action) - state_new = state_new.astype(np.float32) - if done: reward = -20 - - episode_reward += reward - state = state_new - t += 1 - - if done or t >= MAX_STEPS: - print('Testing | Episode: {}/{} | Episode Reward: {:.0f} | Running Time: {:.4f}' \ - .format(episode + 1, TEST_EPISODES, episode_reward, time.time() - t0)) - break diff --git a/examples/reinforcement_learning/tutorial_C51.py b/examples/reinforcement_learning/tutorial_C51.py deleted file mode 100644 index 50b82d66e..000000000 --- a/examples/reinforcement_learning/tutorial_C51.py +++ /dev/null @@ -1,343 +0,0 @@ -""" -C51 Algorithm ------------------------- -Categorical 51 distributional RL algorithm, 51 means the number of atoms. In -this algorithm, instead of estimating actual expected value, value distribution -over a series of continuous sub-intervals (atoms) is considered. -Reference: ------------------------- -Bellemare M G, Dabney W, Munos R. A distributional perspective on reinforcement -learning[C]//Proceedings of the 34th International Conference on Machine -Learning-Volume 70. JMLR. org, 2017: 449-458. -Environment: ------------------------- -Cartpole and Pong in OpenAI Gym -Requirements: ------------------------- -tensorflow>=2.0.0a0 -tensorlayer>=2.0.0 -To run: ------------------------- -python tutorial_C51.py --mode=train -python tutorial_C51.py --mode=test --save_path=c51/8000.npz -""" -import argparse -import os -import random -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -parser = argparse.ArgumentParser() -parser.add_argument('--train', dest='train', action='store_true', default=True) -parser.add_argument('--test', dest='test', action='store_true', default=True) -parser.add_argument( - '--save_path', default=None, help='folder to save if mode == train else model path,' - 'qnet will be saved once target net update' -) -parser.add_argument('--seed', help='random seed', type=int, default=0) -parser.add_argument('--env_id', default='CartPole-v0', help='CartPole-v0 or PongNoFrameskip-v4') -args = parser.parse_args() - -random.seed(args.seed) -np.random.seed(args.seed) -tf.random.set_seed(args.seed) # reproducible -env_id = args.env_id -env = gym.make(env_id) -env.seed(args.seed) -alg_name = 'C51' - -# #################### hyper parameters #################### -if env_id == 'CartPole-v0': - qnet_type = 'MLP' - number_timesteps = 10000 # total number of time steps to train on - explore_timesteps = 100 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 5e-3 # learning rate - buffer_size = 1000 # replay buffer size - target_q_update_freq = 50 # how frequency target q net update - ob_scale = 1.0 # scale observations - clipnorm = None -else: - # reward will increase obviously after 1e5 time steps - qnet_type = 'CNN' - number_timesteps = int(1e6) # total number of time steps to train on - explore_timesteps = 1e5 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 1e-4 # learning rate - buffer_size = 10000 # replay buffer size - target_q_update_freq = 200 # how frequency target q net update - ob_scale = 1.0 / 255 # scale observations - clipnorm = 10 - -in_dim = env.observation_space.shape -out_dim = env.action_space.n -reward_gamma = 0.99 # reward discount -batch_size = 32 # batch size for sampling from replay buffer -warm_start = buffer_size / 10 # sample times befor learning -atom_num = 51 -min_value = -10 -max_value = 10 -vrange = np.linspace(min_value, max_value, atom_num) -deltaz = float(max_value - min_value) / (atom_num - 1) - - -# ############################## Network #################################### -class MLP(tl.models.Model): - - def __init__(self, name): - super(MLP, self).__init__(name=name) - self.h1 = tl.layers.Dense(64, tf.nn.tanh, in_channels=in_dim[0], W_init=tf.initializers.GlorotUniform()) - self.qvalue = tl.layers.Dense( - out_dim * atom_num, in_channels=64, name='q', W_init=tf.initializers.GlorotUniform() - ) - self.reshape = tl.layers.Reshape((-1, out_dim, atom_num)) - - def forward(self, ni): - qvalues = self.qvalue(self.h1(ni)) - return tf.nn.log_softmax(self.reshape(qvalues), 2) - - -class CNN(tl.models.Model): - - def __init__(self, name): - super(CNN, self).__init__(name=name) - h, w, in_channels = in_dim - dense_in_channels = 64 * ((h - 28) // 8) * ((w - 28) // 8) - self.conv1 = tl.layers.Conv2d( - 32, (8, 8), (4, 4), tf.nn.relu, 'VALID', in_channels=in_channels, name='conv2d_1', - W_init=tf.initializers.GlorotUniform() - ) - self.conv2 = tl.layers.Conv2d( - 64, (4, 4), (2, 2), tf.nn.relu, 'VALID', in_channels=32, name='conv2d_2', - W_init=tf.initializers.GlorotUniform() - ) - self.conv3 = tl.layers.Conv2d( - 64, (3, 3), (1, 1), tf.nn.relu, 'VALID', in_channels=64, name='conv2d_3', - W_init=tf.initializers.GlorotUniform() - ) - self.flatten = tl.layers.Flatten(name='flatten') - self.preq = tl.layers.Dense( - 256, tf.nn.relu, in_channels=dense_in_channels, name='pre_q', W_init=tf.initializers.GlorotUniform() - ) - self.qvalue = tl.layers.Dense( - out_dim * atom_num, in_channels=256, name='q', W_init=tf.initializers.GlorotUniform() - ) - self.reshape = tl.layers.Reshape((-1, out_dim, atom_num)) - - def forward(self, ni): - feature = self.flatten(self.conv3(self.conv2(self.conv1(ni)))) - qvalues = self.qvalue(self.preq(feature)) - return tf.nn.log_softmax(self.reshape(qvalues), 2) - - -# ############################## Replay #################################### -class ReplayBuffer(object): - - def __init__(self, size): - self._storage = [] - self._maxsize = size - self._next_idx = 0 - - def __len__(self): - return len(self._storage) - - def add(self, *args): - if self._next_idx >= len(self._storage): - self._storage.append(args) - else: - self._storage[self._next_idx] = args - self._next_idx = (self._next_idx + 1) % self._maxsize - - def _encode_sample(self, idxes): - b_o, b_a, b_r, b_o_, b_d = [], [], [], [], [] - for i in idxes: - o, a, r, o_, d = self._storage[i] - b_o.append(o) - b_a.append(a) - b_r.append(r) - b_o_.append(o_) - b_d.append(d) - return ( - np.stack(b_o).astype('float32') * ob_scale, - np.stack(b_a).astype('int32'), - np.stack(b_r).astype('float32'), - np.stack(b_o_).astype('float32') * ob_scale, - np.stack(b_d).astype('float32'), - ) - - def sample(self, batch_size): - indexes = range(len(self._storage)) - idxes = [random.choice(indexes) for _ in range(batch_size)] - return self._encode_sample(idxes) - - -# ############################# Functions ################################### -def huber_loss(x): - """Loss function for value""" - return tf.where(tf.abs(x) < 1, tf.square(x) * 0.5, tf.abs(x) - 0.5) - - -def sync(net, net_tar): - """Copy q network to target q network""" - for var, var_tar in zip(net.trainable_weights, net_tar.trainable_weights): - var_tar.assign(var) - - -# ############################### DQN ##################################### -class DQN(object): - - def __init__(self): - model = MLP if qnet_type == 'MLP' else CNN - self.qnet = model('q') - if args.train: - self.qnet.train() - self.targetqnet = model('targetq') - self.targetqnet.infer() - sync(self.qnet, self.targetqnet) - else: - self.qnet.infer() - self.load(args.save_path) - self.niter = 0 - if clipnorm is not None: - self.optimizer = tf.optimizers.Adam(learning_rate=lr, clipnorm=clipnorm) - else: - self.optimizer = tf.optimizers.Adam(learning_rate=lr) - - def get_action(self, obv): - eps = epsilon(self.niter) - if args.train and random.random() < eps: - return int(random.random() * out_dim) - else: - obv = np.expand_dims(obv, 0).astype('float32') * ob_scale - qdist = np.exp(self._qvalues_func(obv).numpy()) - qvalues = (qdist * vrange).sum(-1) - return qvalues.argmax(1)[0] - - @tf.function - def _qvalues_func(self, obv): - return self.qnet(obv) - - def train(self, b_o, b_a, b_r, b_o_, b_d): - # TODO: move q_estimation in tf.function - b_dist_ = np.exp(self.targetqnet(b_o_).numpy()) - b_a_ = (b_dist_ * vrange).sum(-1).argmax(1) - b_tzj = np.clip(reward_gamma * (1 - b_d[:, None]) * vrange[None, :] + b_r[:, None], min_value, max_value) - b_i = (b_tzj - min_value) / deltaz - b_l = np.floor(b_i).astype('int64') - b_u = np.ceil(b_i).astype('int64') - templ = b_dist_[range(batch_size), b_a_, :] * (b_u - b_i) - tempu = b_dist_[range(batch_size), b_a_, :] * (b_i - b_l) - b_m = np.zeros((batch_size, atom_num)) - # TODO: aggregate value by index and batch update (scatter_add) - for j in range(batch_size): - for k in range(atom_num): - b_m[j][b_l[j][k]] += templ[j][k] - b_m[j][b_u[j][k]] += tempu[j][k] - b_m = tf.convert_to_tensor(b_m, dtype='float32') - b_index = np.stack([range(batch_size), b_a], 1) - b_index = tf.convert_to_tensor(b_index, 'int64') - - self._train_func(b_o, b_index, b_m) - - self.niter += 1 - if self.niter % target_q_update_freq == 0: - sync(self.qnet, self.targetqnet) - self.save(args.save_path) - - def save(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'q_net.hdf5'), self.qnet) - - def load(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'q_net.hdf5'), self.qnet) - - @tf.function - def _train_func(self, b_o, b_index, b_m): - with tf.GradientTape() as tape: - b_dist_a = tf.gather_nd(self.qnet(b_o), b_index) - loss = tf.reduce_mean(tf.negative(tf.reduce_sum(b_dist_a * b_m, 1))) - - grad = tape.gradient(loss, self.qnet.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.qnet.trainable_weights)) - - -# ############################# Trainer ################################### -if __name__ == '__main__': - dqn = DQN() - t0 = time.time() - if args.train: - buffer = ReplayBuffer(buffer_size) - nepisode = 0 - all_episode_reward = [] - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - a = dqn.get_action(o) - # execute action and feed to replay buffer - # note that `_` tail in var name means next - o_, r, done, info = env.step(a) - buffer.add(o, a, r, o_, done) - episode_reward += r - - if i >= warm_start: - transitions = buffer.sample(batch_size) - dqn.train(*transitions) - - if done: - break - else: - o = o_ - - if nepisode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - nepisode += 1 - print( - 'Training | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) # episode num starts from 1 in print - - dqn.save(args.save_path) - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([alg_name, env_id]))) - - if args.test: - nepisode = 0 - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - env.render() - a = dqn.get_action(o) - o_, r, done, info = env.step(a) - episode_reward += r - if done: - break - else: - o = o_ - nepisode += 1 - print( - 'Testing | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_DDPG.py b/examples/reinforcement_learning/tutorial_DDPG.py deleted file mode 100644 index c006a7bf4..000000000 --- a/examples/reinforcement_learning/tutorial_DDPG.py +++ /dev/null @@ -1,305 +0,0 @@ -""" -Deep Deterministic Policy Gradient (DDPG) ------------------------------------------ -An algorithm concurrently learns a Q-function and a policy. -It uses off-policy data and the Bellman equation to learn the Q-function, -and uses the Q-function to learn the policy. - -Reference ---------- -Deterministic Policy Gradient Algorithms, Silver et al. 2014 -Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016 -MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/ - -Environment ------------ -Openai Gym Pendulum-v0, continual action space - -Prerequisites -------------- -tensorflow >=2.0.0a0 -tensorflow-proactionsbility 0.6.0 -tensorlayer >=2.0.0 - -To run ------- -python tutorial_DDPG.py --train/test - -""" - -import argparse -import os -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'Pendulum-v0' # environment id -RANDOM_SEED = 2 # random seed, can be either an int number or None -RENDER = False # render while training - -ALG_NAME = 'DDPG' -TRAIN_EPISODES = 100 # total number of episodes for training -TEST_EPISODES = 10 # total number of episodes for training -MAX_STEPS = 200 # total number of steps for each episode - -LR_A = 0.001 # learning rate for actor -LR_C = 0.002 # learning rate for critic -GAMMA = 0.9 # reward discount -TAU = 0.01 # soft replacement -MEMORY_CAPACITY = 10000 # size of replay buffer -BATCH_SIZE = 32 # update action batch size -VAR = 2 # control exploration - -############################### DDPG #################################### - - -class DDPG(object): - """ - DDPG class - """ - - def __init__(self, action_dim, state_dim, action_range): - self.memory = np.zeros((MEMORY_CAPACITY, state_dim * 2 + action_dim + 1), dtype=np.float32) - self.pointer = 0 - self.action_dim, self.state_dim, self.action_range = action_dim, state_dim, action_range - self.var = VAR - - W_init = tf.random_normal_initializer(mean=0, stddev=0.3) - b_init = tf.constant_initializer(0.1) - - def get_actor(input_state_shape, name=''): - """ - Build actor network - :param input_state_shape: state - :param name: name - :return: act - """ - input_layer = tl.layers.Input(input_state_shape, name='A_input') - layer = tl.layers.Dense(n_units=64, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='A_l1')(input_layer) - layer = tl.layers.Dense(n_units=64, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='A_l2')(layer) - layer = tl.layers.Dense(n_units=action_dim, act=tf.nn.tanh, W_init=W_init, b_init=b_init, name='A_a')(layer) - layer = tl.layers.Lambda(lambda x: action_range * x)(layer) - return tl.models.Model(inputs=input_layer, outputs=layer, name='Actor' + name) - - def get_critic(input_state_shape, input_action_shape, name=''): - """ - Build critic network - :param input_state_shape: state - :param input_action_shape: act - :param name: name - :return: Q value Q(s,a) - """ - state_input = tl.layers.Input(input_state_shape, name='C_s_input') - action_input = tl.layers.Input(input_action_shape, name='C_a_input') - layer = tl.layers.Concat(1)([state_input, action_input]) - layer = tl.layers.Dense(n_units=64, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='C_l1')(layer) - layer = tl.layers.Dense(n_units=64, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='C_l2')(layer) - layer = tl.layers.Dense(n_units=1, W_init=W_init, b_init=b_init, name='C_out')(layer) - return tl.models.Model(inputs=[state_input, action_input], outputs=layer, name='Critic' + name) - - self.actor = get_actor([None, state_dim]) - self.critic = get_critic([None, state_dim], [None, action_dim]) - self.actor.train() - self.critic.train() - - def copy_para(from_model, to_model): - """ - Copy parameters for soft updating - :param from_model: latest model - :param to_model: target model - :return: None - """ - for i, j in zip(from_model.trainable_weights, to_model.trainable_weights): - j.assign(i) - - self.actor_target = get_actor([None, state_dim], name='_target') - copy_para(self.actor, self.actor_target) - self.actor_target.eval() - - self.critic_target = get_critic([None, state_dim], [None, action_dim], name='_target') - copy_para(self.critic, self.critic_target) - self.critic_target.eval() - - self.ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement - - self.actor_opt = tf.optimizers.Adam(LR_A) - self.critic_opt = tf.optimizers.Adam(LR_C) - - def ema_update(self): - """ - Soft updating by exponential smoothing - :return: None - """ - paras = self.actor.trainable_weights + self.critic.trainable_weights - self.ema.apply(paras) - for i, j in zip(self.actor_target.trainable_weights + self.critic_target.trainable_weights, paras): - i.assign(self.ema.average(j)) - - def get_action(self, s, greedy=False): - """ - Choose action - :param s: state - :param greedy: get action greedy or not - :return: act - """ - a = self.actor(np.array([s], dtype=np.float32))[0] - if greedy: - return a - return np.clip( - np.random.normal(a, self.var), -self.action_range, self.action_range - ) # add randomness to action selection for exploration - - def learn(self): - """ - Update parameters - :return: None - """ - self.var *= .9995 - indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE) - datas = self.memory[indices, :] - states = datas[:, :self.state_dim] - actions = datas[:, self.state_dim:self.state_dim + self.action_dim] - rewards = datas[:, -self.state_dim - 1:-self.state_dim] - states_ = datas[:, -self.state_dim:] - - with tf.GradientTape() as tape: - actions_ = self.actor_target(states_) - q_ = self.critic_target([states_, actions_]) - y = rewards + GAMMA * q_ - q = self.critic([states, actions]) - td_error = tf.losses.mean_squared_error(y, q) - critic_grads = tape.gradient(td_error, self.critic.trainable_weights) - self.critic_opt.apply_gradients(zip(critic_grads, self.critic.trainable_weights)) - - with tf.GradientTape() as tape: - a = self.actor(states) - q = self.critic([states, a]) - actor_loss = -tf.reduce_mean(q) # maximize the q - actor_grads = tape.gradient(actor_loss, self.actor.trainable_weights) - self.actor_opt.apply_gradients(zip(actor_grads, self.actor.trainable_weights)) - self.ema_update() - - def store_transition(self, s, a, r, s_): - """ - Store data in data buffer - :param s: state - :param a: act - :param r: reward - :param s_: next state - :return: None - """ - s = s.astype(np.float32) - s_ = s_.astype(np.float32) - transition = np.hstack((s, a, [r], s_)) - index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory - self.memory[index, :] = transition - self.pointer += 1 - - def save(self): - """ - save trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.save_weights_to_hdf5(os.path.join(path, 'actor_target.hdf5'), self.actor_target) - tl.files.save_weights_to_hdf5(os.path.join(path, 'critic.hdf5'), self.critic) - tl.files.save_weights_to_hdf5(os.path.join(path, 'critic_target.hdf5'), self.critic_target) - - def load(self): - """ - load trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'actor_target.hdf5'), self.actor_target) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'critic.hdf5'), self.critic) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'critic_target.hdf5'), self.critic_target) - - -if __name__ == '__main__': - env = gym.make(ENV_ID).unwrapped - - # reproducible - env.seed(RANDOM_SEED) - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_range = env.action_space.high # scale action, [-action_range, action_range] - - agent = DDPG(action_dim, state_dim, action_range) - - t0 = time.time() - if args.train: # train - all_episode_reward = [] - for episode in range(TRAIN_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - if RENDER: - env.render() - # Add exploration noise - action = agent.get_action(state) - state_, reward, done, info = env.step(action) - agent.store_transition(state, action, reward, state_) - - if agent.pointer > MEMORY_CAPACITY: - agent.learn() - - state = state_ - episode_reward += reward - if done: - break - - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, - time.time() - t0 - ) - ) - agent.save() - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - # test - agent.load() - for episode in range(TEST_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - state, reward, done, info = env.step(agent.get_action(state, greedy=True)) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_DPPO.py b/examples/reinforcement_learning/tutorial_DPPO.py deleted file mode 100644 index dbfd78db5..000000000 --- a/examples/reinforcement_learning/tutorial_DPPO.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -Distributed Proximal Policy Optimization (DPPO) ----------------------------- -A distributed version of OpenAI's Proximal Policy Optimization (PPO). -Workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data. -Restart workers once PPO is updated. - -Reference ---------- -Emergence of Locomotion Behaviours in Rich Environments, Heess et al. 2017 -Proximal Policy Optimization Algorithms, Schulman et al. 2017 -High Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016 -MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials - -Environment ------------ -Openai Gym Pendulum-v0, continual action space - -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 - -To run ------- -python tutorial_DPPO.py --train/test -""" - -import argparse -import os -import queue -import threading -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp - -import tensorlayer as tl - -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'Pendulum-v0' # environment name -RANDOMSEED = 2 # random seed -RENDER = False # render while training - -ALG_NAME = 'DPPO' -TRAIN_EPISODES = 1000 # total number of episodes for training -TEST_EPISODES = 10 # number of overall episodes for testing -MAX_STEPS = 200 # total number of steps for each episode -GAMMA = 0.9 # reward discount -LR_A = 0.0001 # learning rate for actor -LR_C = 0.0002 # learning rate for critic -ACTOR_UPDATE_STEPS = 10 # actor update steps -CRITIC_UPDATE_STEPS = 10 # critic update steps -MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO - -N_WORKER = 4 # parallel workers -UPDATE_STEP = 10 # loop update operation n-steps - -# ppo-penalty parameters -KL_TARGET = 0.01 -LAM = 0.5 - -# ppo-clip parameters -EPSILON = 0.2 - - -############################### DPPO #################################### - - -class PPO(object): - """ - PPO class - """ - - def __init__(self, state_dim, action_dim, action_bound, method='clip'): - - # critic - with tf.name_scope('critic'): - inputs = tl.layers.Input([None, state_dim], tf.float32, 'state') - layer = tl.layers.Dense(64, tf.nn.relu)(inputs) - layer = tl.layers.Dense(64, tf.nn.relu)(layer) - v = tl.layers.Dense(1)(layer) - self.critic = tl.models.Model(inputs, v) - self.critic.train() - self.method = method - - # actor - with tf.name_scope('actor'): - inputs = tl.layers.Input([None, state_dim], tf.float32, 'state') - layer = tl.layers.Dense(64, tf.nn.relu)(inputs) - layer = tl.layers.Dense(64, tf.nn.relu)(layer) - a = tl.layers.Dense(action_dim, tf.nn.tanh)(layer) - mean = tl.layers.Lambda(lambda x: x * action_bound, name='lambda')(a) - logstd = tf.Variable(np.zeros(action_dim, dtype=np.float32)) - self.actor = tl.models.Model(inputs, mean) - self.actor.trainable_weights.append(logstd) - self.actor.logstd = logstd - self.actor.train() - - self.actor_opt = tf.optimizers.Adam(LR_A) - self.critic_opt = tf.optimizers.Adam(LR_C) - - self.method = method - if method == 'penalty': - self.kl_target = KL_TARGET - self.lam = LAM - elif method == 'clip': - self.epsilon = EPSILON - - self.state_buffer, self.action_buffer = [], [] - self.reward_buffer, self.cumulative_reward_buffer = [], [] - self.action_bound = action_bound - - def train_actor(self, state, action, adv, old_pi): - """ - Update policy network - :param state: state batch - :param action: action batch - :param adv: advantage batch - :param old_pi: old pi distribution - :return: kl_mean or None - """ - with tf.GradientTape() as tape: - mean, std = self.actor(state), tf.exp(self.actor.logstd) - pi = tfp.distributions.Normal(mean, std) - - ratio = tf.exp(pi.log_prob(action) - old_pi.log_prob(action)) - surr = ratio * adv - if self.method == 'penalty': # ppo penalty - kl = tfp.distributions.kl_divergence(old_pi, pi) - kl_mean = tf.reduce_mean(kl) - loss = -(tf.reduce_mean(surr - self.lam * kl)) - else: # ppo clip - loss = -tf.reduce_mean( - tf.minimum(surr, - tf.clip_by_value(ratio, 1. - self.epsilon, 1. + self.epsilon) * adv) - ) - a_gard = tape.gradient(loss, self.actor.trainable_weights) - self.actor_opt.apply_gradients(zip(a_gard, self.actor.trainable_weights)) - - if self.method == 'kl_pen': - return kl_mean - - def train_critic(self, reward, state): - """ - Update actor network - :param reward: cumulative reward batch - :param state: state batch - :return: None - """ - reward = np.array(reward, dtype=np.float32) - with tf.GradientTape() as tape: - advantage = reward - self.critic(state) - loss = tf.reduce_mean(tf.square(advantage)) - grad = tape.gradient(loss, self.critic.trainable_weights) - self.critic_opt.apply_gradients(zip(grad, self.critic.trainable_weights)) - - def update(self): - """ - Update parameter with the constraint of KL divergent - :return: None - """ - global GLOBAL_UPDATE_COUNTER - while not COORD.should_stop(): - if GLOBAL_EP < TRAIN_EPISODES: - UPDATE_EVENT.wait() # wait until get batch of data - - data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers - s, a, r = zip(*data) - s = np.vstack(s).astype(np.float32) - a = np.vstack(a).astype(np.float32) - r = np.vstack(r).astype(np.float32) - mean, std = self.actor(s), tf.exp(self.actor.logstd) - pi = tfp.distributions.Normal(mean, std) - adv = r - self.critic(s) - # adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful - - # update actor - if self.method == 'kl_pen': - for _ in range(ACTOR_UPDATE_STEPS): - kl = self.train_actor(s, a, adv, pi) - if kl < self.kl_target / 1.5: - self.lam /= 2 - elif kl > self.kl_target * 1.5: - self.lam *= 2 - else: - for _ in range(ACTOR_UPDATE_STEPS): - self.train_actor(s, a, adv, pi) - - # update critic - for _ in range(CRITIC_UPDATE_STEPS): - self.train_critic(r, s) - - UPDATE_EVENT.clear() # updating finished - GLOBAL_UPDATE_COUNTER = 0 # reset counter - ROLLING_EVENT.set() # set roll-out available - - def get_action(self, state, greedy=False): - """ - Choose action - :param state: state - :param greedy: choose action greedy or not - :return: clipped action - """ - state = state[np.newaxis, :].astype(np.float32) - mean, std = self.actor(state), tf.exp(self.actor.logstd) - if greedy: - action = mean[0] - else: - pi = tfp.distributions.Normal(mean, std) - action = tf.squeeze(pi.sample(1), axis=0)[0] # choosing action - return np.clip(action, -self.action_bound, self.action_bound) - - def save(self): - """ - save trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.save_weights_to_hdf5(os.path.join(path, 'critic.hdf5'), self.critic) - - def load(self): - """ - load trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'critic.hdf5'), self.critic) - - -"""--------------------------------------------------------------""" - - -class Worker(object): - """ - Worker class for distributional running - """ - - def __init__(self, wid): - self.wid = wid - self.env = gym.make(ENV_ID).unwrapped - self.env.seed(wid * 100 + RANDOMSEED) - self.ppo = GLOBAL_PPO - - def work(self): - """ - Define a worker - :return: None - """ - global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER - while not COORD.should_stop(): - s = self.env.reset() - ep_r = 0 - buffer_s, buffer_a, buffer_r = [], [], [] - for t in range(MAX_STEPS): - if not ROLLING_EVENT.is_set(): # while global PPO is updating - ROLLING_EVENT.wait() # wait until PPO is updated - buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data - a = self.ppo.get_action(s) - s_, r, done, _ = self.env.step(a) - if RENDER and self.wid == 0: - self.env.render() - buffer_s.append(s) - buffer_a.append(a) - buffer_r.append(r) - s = s_ - ep_r += r - - GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers - if t == MAX_STEPS - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: - # finish patyh - if done: - v_s_ = 0 - else: - v_s_ = self.ppo.critic(np.array([s_], np.float32))[0][0] - discounted_r = [] # compute discounted reward - for r in buffer_r[::-1]: - v_s_ = r + GAMMA * v_s_ - discounted_r.append(v_s_) - discounted_r.reverse() - buffer_r = np.array(discounted_r)[:, np.newaxis] - QUEUE.put([buffer_s, buffer_a, buffer_r]) # put data in the queue - buffer_s, buffer_a, buffer_r = [], [], [] - - # update - if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: - ROLLING_EVENT.clear() # stop collecting data - UPDATE_EVENT.set() # globalPPO update - - # stop training - if GLOBAL_EP >= TRAIN_EPISODES: - COORD.request_stop() - break - - print( - 'Training | Episode: {}/{} | Worker: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - GLOBAL_EP + 1, TRAIN_EPISODES, self.wid, ep_r, time.time() - T0 - ) - ) - # record reward changes, plot later - if len(GLOBAL_RUNNING_R) == 0: - GLOBAL_RUNNING_R.append(ep_r) - else: - GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1] * 0.9 + ep_r * 0.1) - GLOBAL_EP += 1 - - -if __name__ == '__main__': - - # reproducible - np.random.seed(RANDOMSEED) - tf.random.set_seed(RANDOMSEED) - - env = gym.make(ENV_ID) - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_bound = env.action_space.high - env.close() - - GLOBAL_PPO = PPO(state_dim, action_dim, action_bound) - T0 = time.time() - if args.train: # train - UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() - UPDATE_EVENT.clear() # not update now - ROLLING_EVENT.set() # start to roll out - workers = [Worker(wid=i) for i in range(N_WORKER)] - - GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 - GLOBAL_RUNNING_R = [] - COORD = tf.train.Coordinator() - QUEUE = queue.Queue() # workers putting data in this queue - threads = [] - for worker in workers: # worker threads - t = threading.Thread(target=worker.work) - t.start() # training - threads.append(t) - # add a PPO updating thread - threads.append(threading.Thread(target=GLOBAL_PPO.update)) - threads[-1].start() - COORD.join(threads) - - GLOBAL_PPO.save() - - plt.plot(GLOBAL_RUNNING_R) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - # test - if args.test: - GLOBAL_PPO.load() - for episode in range(TEST_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - state, reward, done, info = env.step(GLOBAL_PPO.get_action(state, greedy=True)) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - T0)) diff --git a/examples/reinforcement_learning/tutorial_DQN.py b/examples/reinforcement_learning/tutorial_DQN.py deleted file mode 100644 index 5fdabdeb2..000000000 --- a/examples/reinforcement_learning/tutorial_DQN.py +++ /dev/null @@ -1,182 +0,0 @@ -""" -Deep Q-Network Q(a, s) ------------------------ -TD Learning, Off-Policy, e-Greedy Exploration (GLIE). -Q(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A)) -delta_w = R + lambda * Q(newS, newA) -See David Silver RL Tutorial Lecture 5 - Q-Learning for more details. -Reference ----------- -original paper: https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf -EN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw -CN: https://zhuanlan.zhihu.com/p/25710327 -Note: Policy Network has been proved to be better than Q-Learning, see tutorial_atari_pong.py -Environment ------------ -# The FrozenLake v0 environment -https://gym.openai.com/envs/FrozenLake-v0 -The agent controls the movement of a character in a grid world. Some tiles of -the grid are walkable, and others lead to the agent falling into the water. -Additionally, the movement direction of the agent is uncertain and only partially -depends on the chosen direction. The agent is rewarded for finding a walkable -path to a goal tile. -SFFF (S: starting point, safe) -FHFH (F: frozen surface, safe) -FFFH (H: hole, fall to your doom) -HFFG (G: goal, where the frisbee is located) -The episode ends when you reach the goal or fall in a hole. You receive a reward -of 1 if you reach the goal, and zero otherwise. -Prerequisites --------------- -tensorflow>=2.0.0a0 -tensorlayer>=2.0.0 -To run -------- -python tutorial_DQN.py --train/test -""" -import argparse -import os -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=True) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -tl.logging.set_verbosity(tl.logging.DEBUG) - -##################### hyper parameters #################### -env_id = 'FrozenLake-v0' -alg_name = 'DQN' -lambd = .99 # decay factor -e = 0.1 # e-Greedy Exploration, the larger the more random -num_episodes = 10000 -render = False # display the game environment - -##################### DQN ########################## - - -def to_one_hot(i, n_classes=None): - a = np.zeros(n_classes, 'uint8') - a[i] = 1 - return a - - -## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function. -# encoding for state: 4x4 grid can be represented by one-hot vector with 16 integers. -def get_model(inputs_shape): - ni = tl.layers.Input(inputs_shape, name='observation') - nn = tl.layers.Dense(4, act=None, W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')(ni) - return tl.models.Model(inputs=ni, outputs=nn, name="Q-Network") - - -def save_ckpt(model): # save trained weights - path = os.path.join('model', '_'.join([alg_name, env_id])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'dqn_model.hdf5'), model) - - -def load_ckpt(model): # load trained weights - path = os.path.join('model', '_'.join([alg_name, env_id])) - tl.files.save_weights_to_hdf5(os.path.join(path, 'dqn_model.hdf5'), model) - - -if __name__ == '__main__': - - qnetwork = get_model([None, 16]) - qnetwork.train() - train_weights = qnetwork.trainable_weights - - optimizer = tf.optimizers.SGD(learning_rate=0.1) - env = gym.make(env_id) - - t0 = time.time() - if args.train: - all_episode_reward = [] - for i in range(num_episodes): - ## Reset environment and get first new observation - s = env.reset() # observation is state, integer 0 ~ 15 - rAll = 0 - if render: env.render() - for j in range(99): # step index, maximum step is 99 - ## Choose an action by greedily (with e chance of random action) from the Q-network - allQ = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)).numpy() - a = np.argmax(allQ, 1) - - ## e-Greedy Exploration !!! sample random action - if np.random.rand(1) < e: - a[0] = env.action_space.sample() - ## Get new state and reward from environment - s1, r, d, _ = env.step(a[0]) - if render: env.render() - ## Obtain the Q' values by feeding the new state through our network - Q1 = qnetwork(np.asarray([to_one_hot(s1, 16)], dtype=np.float32)).numpy() - - ## Obtain maxQ' and set our target value for chosen action. - maxQ1 = np.max(Q1) # in Q-Learning, policy is greedy, so we use "max" to select the next action. - targetQ = allQ - targetQ[0, a[0]] = r + lambd * maxQ1 - ## Train network using target and predicted Q values - # it is not real target Q value, it is just an estimation, - # but check the Q-Learning update formula: - # Q'(s,a) <- Q(s,a) + alpha(r + lambd * maxQ(s',a') - Q(s, a)) - # minimizing |r + lambd * maxQ(s',a') - Q(s, a)|^2 equals to force Q'(s,a) ≈ Q(s,a) - with tf.GradientTape() as tape: - _qvalues = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)) - _loss = tl.cost.mean_squared_error(targetQ, _qvalues, is_mean=False) - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - rAll += r - s = s1 - ## Reduce chance of random action if an episode is done. - if d ==True: - e = 1. / ((i / 50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration - break - - ## Note that, the rewards here with random action - print('Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \ - .format(i, num_episodes, rAll, time.time() - t0)) - - if i == 0: - all_episode_reward.append(rAll) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + rAll * 0.1) - - save_ckpt(qnetwork) # save model - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([alg_name, env_id]))) - - if args.test: - load_ckpt(qnetwork) # load model - for i in range(num_episodes): - ## Reset environment and get first new observation - s = env.reset() # observation is state, integer 0 ~ 15 - rAll = 0 - if render: env.render() - for j in range(99): # step index, maximum step is 99 - ## Choose an action by greedily (with e chance of random action) from the Q-network - allQ = qnetwork(np.asarray([to_one_hot(s, 16)], dtype=np.float32)).numpy() - a = np.argmax(allQ, 1) # no epsilon, only greedy for testing - - ## Get new state and reward from environment - s1, r, d, _ = env.step(a[0]) - rAll += r - s = s1 - if render: env.render() - ## Reduce chance of random action if an episode is done. - if d: break - - print('Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \ - .format(i, num_episodes, rAll, time.time() - t0)) diff --git a/examples/reinforcement_learning/tutorial_DQN_variants.py b/examples/reinforcement_learning/tutorial_DQN_variants.py deleted file mode 100644 index 5195ef61f..000000000 --- a/examples/reinforcement_learning/tutorial_DQN_variants.py +++ /dev/null @@ -1,433 +0,0 @@ -""" -DQN and its variants ------------------------- -We implement Double DQN, Dueling DQN and Noisy DQN here. -The max operator in standard DQN uses the same values both to select and to -evaluate an action by -Q(s_t, a_t) = R_{t+1} + \gamma * max_{a}Q_{tar}(s_{t+1}, a). -Double DQN propose to use following evaluation to address overestimation problem -of max operator: -Q(s_t, a_t) = R_{t+1} + \gamma * Q_{tar}(s_{t+1}, max_{a}Q(s_{t+1}, a)). -Dueling DQN uses dueling architecture where the value of state and the advantage -of each action is estimated separately. -Noisy DQN propose to explore by adding parameter noises. -Reference: ------------------------- -1. Double DQN - Van Hasselt H, Guez A, Silver D. Deep reinforcement learning with double - q-learning[C]//Thirtieth AAAI Conference on Artificial Intelligence. 2016. -2. Dueling DQN - Wang Z, Schaul T, Hessel M, et al. Dueling network architectures for deep - reinforcement learning[J]. arXiv preprint arXiv:1511.06581, 2015. -3. Noisy DQN - Plappert M, Houthooft R, Dhariwal P, et al. Parameter space noise for - exploration[J]. arXiv preprint arXiv:1706.01905, 2017. -Environment: ------------------------- -Cartpole and Pong in OpenAI Gym -Requirements: ------------------------- -tensorflow>=2.0.0a0 -tensorlayer>=2.0.0 -To run: ------------------------- -python tutorial_DQN_variantes.py --mode=train -python tutorial_DQN_variantes.py --mode=test --save_path=dqn_variants/8000.npz -""" -import argparse -import os -import random -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -parser = argparse.ArgumentParser() -parser.add_argument('--train', dest='train', action='store_true', default=True) -parser.add_argument('--test', dest='test', action='store_true', default=True) -parser.add_argument( - '--save_path', default=None, help='folder to save if mode == train else model path,' - 'qnet will be saved once target net update' -) -parser.add_argument('--seed', help='random seed', type=int, default=0) -parser.add_argument('--env_id', default='CartPole-v0', help='CartPole-v0 or PongNoFrameskip-v4') -parser.add_argument('--noisy_scale', type=float, default=1e-2) -parser.add_argument('--disable_double', action='store_true', default=False) -parser.add_argument('--disable_dueling', action='store_true', default=False) -args = parser.parse_args() - -random.seed(args.seed) -np.random.seed(args.seed) -tf.random.set_seed(args.seed) # reproducible - -env_id = args.env_id -env = gym.make(env_id) -env.seed(args.seed) -noise_scale = args.noisy_scale -double = not args.disable_double -dueling = not args.disable_dueling - -alg_name = 'DQN' -if dueling: alg_name = 'Dueling_' + alg_name -if double: alg_name = 'Double_' + alg_name -if noise_scale != 0: alg_name = 'Noisy_' + alg_name -print(alg_name) -# #################### hyper parameters #################### -if env_id == 'CartPole-v0': - qnet_type = 'MLP' - number_timesteps = 10000 # total number of time steps to train on - explore_timesteps = 100 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 5e-3 # learning rate - buffer_size = 1000 # replay buffer size - target_q_update_freq = 50 # how frequency target q net update - ob_scale = 1.0 # scale observations - clipnorm = None -else: - # reward will increase obviously after 1e5 time steps - qnet_type = 'CNN' - number_timesteps = int(1e6) # total number of time steps to train on - explore_timesteps = 1e5 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 1e-4 # learning rate - buffer_size = 10000 # replay buffer size - target_q_update_freq = 200 # how frequency target q net update - ob_scale = 1.0 / 255 # scale observations - clipnorm = 10 - -in_dim = env.observation_space.shape -out_dim = env.action_space.n -reward_gamma = 0.99 # reward discount -batch_size = 32 # batch size for sampling from replay buffer -warm_start = buffer_size / 10 # sample times befor learning -noise_update_freq = 50 # how frequency param noise net update - - -# ############################## Network #################################### -class MLP(tl.models.Model): - - def __init__(self, name): - super(MLP, self).__init__(name=name) - self.h1 = tl.layers.Dense(64, tf.nn.tanh, in_channels=in_dim[0]) - self.qvalue = tl.layers.Dense(out_dim, in_channels=64, name='q', W_init=tf.initializers.GlorotUniform()) - self.svalue = tl.layers.Dense(1, in_channels=64, name='s', W_init=tf.initializers.GlorotUniform()) - self.noise_scale = 0 - - def forward(self, ni): - feature = self.h1(ni) - - # apply noise to all linear layer - if self.noise_scale != 0: - noises = [] - for layer in [self.qvalue, self.svalue]: - for var in layer.trainable_weights: - noise = tf.random.normal(tf.shape(var), 0, self.noise_scale) - noises.append(noise) - var.assign_add(noise) - - qvalue = self.qvalue(feature) - svalue = self.svalue(feature) - - if self.noise_scale != 0: - idx = 0 - for layer in [self.qvalue, self.svalue]: - for var in layer.trainable_weights: - var.assign_sub(noises[idx]) - idx += 1 - - if dueling: - # dueling network - return svalue + qvalue - tf.reduce_mean(qvalue, 1, keepdims=True) - else: - return qvalue - - -class CNN(tl.models.Model): - - def __init__(self, name): - super(CNN, self).__init__(name=name) - h, w, in_channels = in_dim - dense_in_channels = 64 * ((h - 28) // 8) * ((w - 28) // 8) - self.conv1 = tl.layers.Conv2d( - 32, (8, 8), (4, 4), tf.nn.relu, 'VALID', in_channels=in_channels, name='conv2d_1', - W_init=tf.initializers.GlorotUniform() - ) - self.conv2 = tl.layers.Conv2d( - 64, (4, 4), (2, 2), tf.nn.relu, 'VALID', in_channels=32, name='conv2d_2', - W_init=tf.initializers.GlorotUniform() - ) - self.conv3 = tl.layers.Conv2d( - 64, (3, 3), (1, 1), tf.nn.relu, 'VALID', in_channels=64, name='conv2d_3', - W_init=tf.initializers.GlorotUniform() - ) - self.flatten = tl.layers.Flatten(name='flatten') - self.preq = tl.layers.Dense( - 256, tf.nn.relu, in_channels=dense_in_channels, name='pre_q', W_init=tf.initializers.GlorotUniform() - ) - self.qvalue = tl.layers.Dense(out_dim, in_channels=256, name='q', W_init=tf.initializers.GlorotUniform()) - self.pres = tl.layers.Dense( - 256, tf.nn.relu, in_channels=dense_in_channels, name='pre_s', W_init=tf.initializers.GlorotUniform() - ) - self.svalue = tl.layers.Dense(1, in_channels=256, name='state', W_init=tf.initializers.GlorotUniform()) - self.noise_scale = 0 - - def forward(self, ni): - feature = self.flatten(self.conv3(self.conv2(self.conv1(ni)))) - - # apply noise to all linear layer - if self.noise_scale != 0: - noises = [] - for layer in [self.preq, self.qvalue, self.pres, self.svalue]: - for var in layer.trainable_weights: - noise = tf.random.normal(tf.shape(var), 0, self.noise_scale) - noises.append(noise) - var.assign_add(noise) - - qvalue = self.qvalue(self.preq(feature)) - svalue = self.svalue(self.pres(feature)) - - if self.noise_scale != 0: - idx = 0 - for layer in [self.preq, self.qvalue, self.pres, self.svalue]: - for var in layer.trainable_weights: - var.assign_sub(noises[idx]) - idx += 1 - - if dueling: - # dueling network - return svalue + qvalue - tf.reduce_mean(qvalue, 1, keepdims=True) - else: - return qvalue - - -# ############################## Replay #################################### -class ReplayBuffer(object): - - def __init__(self, size): - self._storage = [] - self._maxsize = size - self._next_idx = 0 - - def __len__(self): - return len(self._storage) - - def add(self, *args): - if self._next_idx >= len(self._storage): - self._storage.append(args) - else: - self._storage[self._next_idx] = args - self._next_idx = (self._next_idx + 1) % self._maxsize - - def _encode_sample(self, idxes): - b_o, b_a, b_r, b_o_, b_d = [], [], [], [], [] - for i in idxes: - o, a, r, o_, d = self._storage[i] - b_o.append(o) - b_a.append(a) - b_r.append(r) - b_o_.append(o_) - b_d.append(d) - return ( - np.stack(b_o).astype('float32') * ob_scale, - np.stack(b_a).astype('int32'), - np.stack(b_r).astype('float32'), - np.stack(b_o_).astype('float32') * ob_scale, - np.stack(b_d).astype('float32'), - ) - - def sample(self, batch_size): - indexes = range(len(self._storage)) - idxes = [random.choice(indexes) for _ in range(batch_size)] - return self._encode_sample(idxes) - - -# ############################# Functions ################################### -def huber_loss(x): - """Loss function for value""" - return tf.where(tf.abs(x) < 1, tf.square(x) * 0.5, tf.abs(x) - 0.5) - - -def sync(net, net_tar): - """Copy q network to target q network""" - for var, var_tar in zip(net.trainable_weights, net_tar.trainable_weights): - var_tar.assign(var) - - -def log_softmax(x, dim): - temp = x - np.max(x, dim, keepdims=True) - return temp - np.log(np.exp(temp).sum(dim, keepdims=True)) - - -def softmax(x, dim): - temp = np.exp(x - np.max(x, dim, keepdims=True)) - return temp / temp.sum(dim, keepdims=True) - - -# ############################### DQN ##################################### -class DQN(object): - - def __init__(self): - model = MLP if qnet_type == 'MLP' else CNN - self.qnet = model('q') - if args.train: - self.qnet.train() - self.targetqnet = model('targetq') - self.targetqnet.infer() - sync(self.qnet, self.targetqnet) - else: - self.qnet.infer() - self.load(args.save_path) - self.niter = 0 - if clipnorm is not None: - self.optimizer = tf.optimizers.Adam(learning_rate=lr, clipnorm=clipnorm) - else: - self.optimizer = tf.optimizers.Adam(learning_rate=lr) - self.noise_scale = noise_scale - - def get_action(self, obv): - eps = epsilon(self.niter) - if args.train: - if random.random() < eps: - return int(random.random() * out_dim) - obv = np.expand_dims(obv, 0).astype('float32') * ob_scale - if self.niter < explore_timesteps: - self.qnet.noise_scale = self.noise_scale - q_ptb = self._qvalues_func(obv).numpy() - self.qnet.noise_scale = 0 - if i % noise_update_freq == 0: - q = self._qvalues_func(obv).numpy() - kl_ptb = (log_softmax(q, 1) - log_softmax(q_ptb, 1)) - kl_ptb = np.sum(kl_ptb * softmax(q, 1), 1).mean() - kl_explore = -np.log(1 - eps + eps / out_dim) - if kl_ptb < kl_explore: - self.noise_scale *= 1.01 - else: - self.noise_scale /= 1.01 - return q_ptb.argmax(1)[0] - else: - return self._qvalues_func(obv).numpy().argmax(1)[0] - else: - obv = np.expand_dims(obv, 0).astype('float32') * ob_scale - return self._qvalues_func(obv).numpy().argmax(1)[0] - - @tf.function - def _qvalues_func(self, obv): - return self.qnet(obv) - - def train(self, b_o, b_a, b_r, b_o_, b_d): - self._train_func(b_o, b_a, b_r, b_o_, b_d) - - self.niter += 1 - if self.niter % target_q_update_freq == 0: - sync(self.qnet, self.targetqnet) - self.save(args.save_path) - - @tf.function - def _train_func(self, b_o, b_a, b_r, b_o_, b_d): - with tf.GradientTape() as tape: - td_errors = self._tderror_func(b_o, b_a, b_r, b_o_, b_d) - loss = tf.reduce_mean(huber_loss(td_errors)) - - grad = tape.gradient(loss, self.qnet.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.qnet.trainable_weights)) - - return td_errors - - @tf.function - def _tderror_func(self, b_o, b_a, b_r, b_o_, b_d): - if double: - b_a_ = tf.one_hot(tf.argmax(self.qnet(b_o_), 1), out_dim) - b_q_ = (1 - b_d) * tf.reduce_sum(self.targetqnet(b_o_) * b_a_, 1) - else: - b_q_ = (1 - b_d) * tf.reduce_max(self.targetqnet(b_o_), 1) - - b_q = tf.reduce_sum(self.qnet(b_o) * tf.one_hot(b_a, out_dim), 1) - return b_q - (b_r + reward_gamma * b_q_) - - def save(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'q_net.hdf5'), self.qnet) - - def load(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'q_net.hdf5'), self.qnet) - - -# ############################# Trainer ################################### -if __name__ == '__main__': - dqn = DQN() - t0 = time.time() - if args.train: - buffer = ReplayBuffer(buffer_size) - nepisode = 0 - all_episode_reward = [] - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - a = dqn.get_action(o) - - # execute action and feed to replay buffer - # note that `_` tail in var name means next - o_, r, done, info = env.step(a) - buffer.add(o, a, r, o_, done) - episode_reward += r - - if i >= warm_start: - transitions = buffer.sample(batch_size) - dqn.train(*transitions) - - if done: - break - else: - o = o_ - - if nepisode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - nepisode += 1 - print( - 'Training | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) # episode num starts from 1 in print - - dqn.save(args.save_path) - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([alg_name, env_id]))) - - if args.test: - nepisode = 0 - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - env.render() - a = dqn.get_action(o) - o_, r, done, info = env.step(a) - episode_reward += r - if done: - break - else: - o = o_ - nepisode += 1 - print( - 'Testing | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_PG.py b/examples/reinforcement_learning/tutorial_PG.py deleted file mode 100644 index 776cd6ac4..000000000 --- a/examples/reinforcement_learning/tutorial_PG.py +++ /dev/null @@ -1,233 +0,0 @@ -""" -Vanilla Policy Gradient(VPG or REINFORCE) ------------------------------------------ -The policy gradient algorithm works by updating policy parameters via stochastic gradient ascent on policy performance. -It's an on-policy algorithm can be used for environments with either discrete or continuous action spaces. -Here is an example on discrete action space game CartPole-v0. -To apply it on continuous action space, you need to change the last softmax layer and the get_action function. - -Reference ---------- -Cookbook: Barto A G, Sutton R S. Reinforcement Learning: An Introduction[J]. 1998. -MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/ - -Environment ------------ -Openai Gym CartPole-v0, discrete action space - -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 - -To run ------- -python tutorial_PG.py --train/test - -""" -import argparse -import os -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'CartPole-v1' # environment id -RANDOM_SEED = 1 # random seed, can be either an int number or None -RENDER = False # render while training - -ALG_NAME = 'PG' -TRAIN_EPISODES = 200 -TEST_EPISODES = 10 -MAX_STEPS = 500 - -############################### PG #################################### - - -class PolicyGradient: - """ - PG class - """ - - def __init__(self, state_dim, action_num, learning_rate=0.02, gamma=0.99): - self.gamma = gamma - - self.state_buffer, self.action_buffer, self.reward_buffer = [], [], [] - - input_layer = tl.layers.Input([None, state_dim], tf.float32) - layer = tl.layers.Dense( - n_units=30, act=tf.nn.tanh, W_init=tf.random_normal_initializer(mean=0, stddev=0.3), - b_init=tf.constant_initializer(0.1) - )(input_layer) - all_act = tl.layers.Dense( - n_units=action_num, act=None, W_init=tf.random_normal_initializer(mean=0, stddev=0.3), - b_init=tf.constant_initializer(0.1) - )(layer) - - self.model = tl.models.Model(inputs=input_layer, outputs=all_act) - self.model.train() - self.optimizer = tf.optimizers.Adam(learning_rate) - - def get_action(self, s, greedy=False): - """ - choose action with probabilities. - :param s: state - :param greedy: choose action greedy or not - :return: act - """ - _logits = self.model(np.array([s], np.float32)) - _probs = tf.nn.softmax(_logits).numpy() - if greedy: - return np.argmax(_probs.ravel()) - return tl.rein.choice_action_by_probs(_probs.ravel()) - - def store_transition(self, s, a, r): - """ - store data in memory buffer - :param s: state - :param a: act - :param r: reward - :return: - """ - self.state_buffer.append(np.array([s], np.float32)) - self.action_buffer.append(a) - self.reward_buffer.append(r) - - def learn(self): - """ - update policy parameters via stochastic gradient ascent - :return: None - """ - discounted_reward_buffer_norm = self._discount_and_norm_rewards() - - with tf.GradientTape() as tape: - _logits = self.model(np.vstack(self.state_buffer)) - neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=_logits, labels=np.array(self.action_buffer) - ) - loss = tf.reduce_mean(neg_log_prob * discounted_reward_buffer_norm) - - grad = tape.gradient(loss, self.model.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights)) - - self.state_buffer, self.action_buffer, self.reward_buffer = [], [], [] # empty episode data - return discounted_reward_buffer_norm - - def _discount_and_norm_rewards(self): - """ - compute discount_and_norm_rewards - :return: discount_and_norm_rewards - """ - # discount episode rewards - discounted_reward_buffer = np.zeros_like(self.reward_buffer) - running_add = 0 - for t in reversed(range(0, len(self.reward_buffer))): - running_add = running_add * self.gamma + self.reward_buffer[t] - discounted_reward_buffer[t] = running_add - - # normalize episode rewards - discounted_reward_buffer -= np.mean(discounted_reward_buffer) - discounted_reward_buffer /= np.std(discounted_reward_buffer) - return discounted_reward_buffer - - def save(self): - """ - save trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'pg_policy.hdf5'), self.model) - - def load(self): - """ - load trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'pg_policy.hdf5'), self.model) - - -if __name__ == '__main__': - env = gym.make(ENV_ID).unwrapped - - # reproducible - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - env.seed(RANDOM_SEED) - - agent = PolicyGradient( - action_num=env.action_space.n, - state_dim=env.observation_space.shape[0], - ) - - t0 = time.time() - - if args.train: - all_episode_reward = [] - for episode in range(TRAIN_EPISODES): - - state = env.reset() - episode_reward = 0 - - for step in range(MAX_STEPS): # in one episode - if RENDER: - env.render() - - action = agent.get_action(state) - next_state, reward, done, info = env.step(action) - agent.store_transition(state, action, reward) - state = next_state - episode_reward += reward - if done: - break - agent.learn() - print( - 'Training | Episode: {}/{} | Episode Reward: {:.0f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, - time.time() - t0 - ) - ) - - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - - agent.save() - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - # test - agent.load() - for episode in range(TEST_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - state, reward, done, info = env.step(agent.get_action(state, True)) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.0f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_PPO.py b/examples/reinforcement_learning/tutorial_PPO.py deleted file mode 100644 index 82d20d2e3..000000000 --- a/examples/reinforcement_learning/tutorial_PPO.py +++ /dev/null @@ -1,322 +0,0 @@ -""" -Proximal Policy Optimization (PPO) ----------------------------- -A simple version of Proximal Policy Optimization (PPO) using single thread. -PPO is a family of first-order methods that use a few other tricks to keep new policies close to old. -PPO methods are significantly simpler to implement, and empirically seem to perform at least as well as TRPO. -Reference ---------- -Proximal Policy Optimization Algorithms, Schulman et al. 2017 -High Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016 -Emergence of Locomotion Behaviours in Rich Environments, Heess et al. 2017 -MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials -Environment ------------ -Openai Gym Pendulum-v0, continual action space -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 -To run ------- -python tutorial_PPO.py --train/test -""" -import argparse -import os -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp - -import tensorlayer as tl - -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'Pendulum-v0' # environment id -RANDOM_SEED = 1 # random seed -RENDER = False # render while training - -ALG_NAME = 'PPO' -TRAIN_EPISODES = 1000 # total number of episodes for training -TEST_EPISODES = 10 # total number of episodes for testing -MAX_STEPS = 200 # total number of steps for each episode -GAMMA = 0.9 # reward discount -LR_A = 0.0001 # learning rate for actor -LR_C = 0.0002 # learning rate for critic -BATCH_SIZE = 32 # update batch size -ACTOR_UPDATE_STEPS = 10 # actor update steps -CRITIC_UPDATE_STEPS = 10 # critic update steps - -# ppo-penalty parameters -KL_TARGET = 0.01 -LAM = 0.5 - -# ppo-clip parameters -EPSILON = 0.2 - - -############################### PPO #################################### - - -class PPO(object): - """ - PPO class - """ - def __init__(self, state_dim, action_dim, action_bound, method='clip'): - # critic - with tf.name_scope('critic'): - inputs = tl.layers.Input([None, state_dim], tf.float32, 'state') - layer = tl.layers.Dense(64, tf.nn.relu)(inputs) - layer = tl.layers.Dense(64, tf.nn.relu)(layer) - v = tl.layers.Dense(1)(layer) - self.critic = tl.models.Model(inputs, v) - self.critic.train() - - # actor - with tf.name_scope('actor'): - inputs = tl.layers.Input([None, state_dim], tf.float32, 'state') - layer = tl.layers.Dense(64, tf.nn.relu)(inputs) - layer = tl.layers.Dense(64, tf.nn.relu)(layer) - a = tl.layers.Dense(action_dim, tf.nn.tanh)(layer) - mean = tl.layers.Lambda(lambda x: x * action_bound, name='lambda')(a) - logstd = tf.Variable(np.zeros(action_dim, dtype=np.float32)) - self.actor = tl.models.Model(inputs, mean) - self.actor.trainable_weights.append(logstd) - self.actor.logstd = logstd - self.actor.train() - - self.actor_opt = tf.optimizers.Adam(LR_A) - self.critic_opt = tf.optimizers.Adam(LR_C) - - self.method = method - if method == 'penalty': - self.kl_target = KL_TARGET - self.lam = LAM - elif method == 'clip': - self.epsilon = EPSILON - - self.state_buffer, self.action_buffer = [], [] - self.reward_buffer, self.cumulative_reward_buffer = [], [] - self.action_bound = action_bound - - def train_actor(self, state, action, adv, old_pi): - """ - Update policy network - :param state: state batch - :param action: action batch - :param adv: advantage batch - :param old_pi: old pi distribution - :return: kl_mean or None - """ - with tf.GradientTape() as tape: - mean, std = self.actor(state), tf.exp(self.actor.logstd) - pi = tfp.distributions.Normal(mean, std) - - ratio = tf.exp(pi.log_prob(action) - old_pi.log_prob(action)) - surr = ratio * adv - if self.method == 'penalty': # ppo penalty - kl = tfp.distributions.kl_divergence(old_pi, pi) - kl_mean = tf.reduce_mean(kl) - loss = -(tf.reduce_mean(surr - self.lam * kl)) - else: # ppo clip - loss = -tf.reduce_mean( - tf.minimum(surr, - tf.clip_by_value(ratio, 1. - self.epsilon, 1. + self.epsilon) * adv) - ) - a_gard = tape.gradient(loss, self.actor.trainable_weights) - self.actor_opt.apply_gradients(zip(a_gard, self.actor.trainable_weights)) - - if self.method == 'kl_pen': - return kl_mean - - def train_critic(self, reward, state): - """ - Update actor network - :param reward: cumulative reward batch - :param state: state batch - :return: None - """ - reward = np.array(reward, dtype=np.float32) - with tf.GradientTape() as tape: - advantage = reward - self.critic(state) - loss = tf.reduce_mean(tf.square(advantage)) - grad = tape.gradient(loss, self.critic.trainable_weights) - self.critic_opt.apply_gradients(zip(grad, self.critic.trainable_weights)) - - def update(self): - """ - Update parameter with the constraint of KL divergent - :return: None - """ - s = np.array(self.state_buffer, np.float32) - a = np.array(self.action_buffer, np.float32) - r = np.array(self.cumulative_reward_buffer, np.float32) - mean, std = self.actor(s), tf.exp(self.actor.logstd) - pi = tfp.distributions.Normal(mean, std) - adv = r - self.critic(s) - - # update actor - if self.method == 'kl_pen': - for _ in range(ACTOR_UPDATE_STEPS): - kl = self.train_actor(s, a, adv, pi) - if kl < self.kl_target / 1.5: - self.lam /= 2 - elif kl > self.kl_target * 1.5: - self.lam *= 2 - else: - for _ in range(ACTOR_UPDATE_STEPS): - self.train_actor(s, a, adv, pi) - - # update critic - for _ in range(CRITIC_UPDATE_STEPS): - self.train_critic(r, s) - - self.state_buffer.clear() - self.action_buffer.clear() - self.cumulative_reward_buffer.clear() - self.reward_buffer.clear() - - def get_action(self, state, greedy=False): - """ - Choose action - :param state: state - :param greedy: choose action greedy or not - :return: clipped action - """ - state = state[np.newaxis, :].astype(np.float32) - mean, std = self.actor(state), tf.exp(self.actor.logstd) - if greedy: - action = mean[0] - else: - pi = tfp.distributions.Normal(mean, std) - action = tf.squeeze(pi.sample(1), axis=0)[0] # choosing action - return np.clip(action, -self.action_bound, self.action_bound) - - def save(self): - """ - save trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.save_weights_to_hdf5(os.path.join(path, 'critic.hdf5'), self.critic) - - def load(self): - """ - load trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'critic.hdf5'), self.critic) - - def store_transition(self, state, action, reward): - """ - Store state, action, reward at each step - :param state: - :param action: - :param reward: - :return: None - """ - self.state_buffer.append(state) - self.action_buffer.append(action) - self.reward_buffer.append(reward) - - def finish_path(self, next_state, done): - """ - Calculate cumulative reward - :param next_state: - :return: None - """ - if done: - v_s_ = 0 - else: - v_s_ = self.critic(np.array([next_state], np.float32))[0, 0] - discounted_r = [] - for r in self.reward_buffer[::-1]: - v_s_ = r + GAMMA * v_s_ - discounted_r.append(v_s_) - discounted_r.reverse() - discounted_r = np.array(discounted_r)[:, np.newaxis] - self.cumulative_reward_buffer.extend(discounted_r) - self.reward_buffer.clear() - - -if __name__ == '__main__': - env = gym.make(ENV_ID).unwrapped - - # reproducible - env.seed(RANDOM_SEED) - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_bound = env.action_space.high - - agent = PPO(state_dim, action_dim, action_bound) - - t0 = time.time() - if args.train: - all_episode_reward = [] - for episode in range(TRAIN_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): # in one episode - if RENDER: - env.render() - action = agent.get_action(state) - state_, reward, done, info = env.step(action) - agent.store_transition(state, action, reward) - state = state_ - episode_reward += reward - - # update ppo - if len(agent.state_buffer) >= BATCH_SIZE: - agent.finish_path(state_, done) - agent.update() - if done: - break - agent.finish_path(state_, done) - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, time.time() - t0) - ) - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - agent.save() - - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - # test - agent.load() - for episode in range(TEST_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - state, reward, done, info = env.step(agent.get_action(state, greedy=True)) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0)) diff --git a/examples/reinforcement_learning/tutorial_Qlearning.py b/examples/reinforcement_learning/tutorial_Qlearning.py deleted file mode 100644 index b2d553403..000000000 --- a/examples/reinforcement_learning/tutorial_Qlearning.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Q-Table learning algorithm. -Non deep learning - TD Learning, Off-Policy, e-Greedy Exploration -Q(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A)) -See David Silver RL Tutorial Lecture 5 - Q-Learning for more details. -For Q-Network, see tutorial_frozenlake_q_network.py -EN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw -CN: https://zhuanlan.zhihu.com/p/25710327 -tensorflow==2.0.0a0 -tensorlayer==2.0.0 -""" - -import argparse -import os -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np - -parser = argparse.ArgumentParser() -parser.add_argument('--train', dest='train', action='store_true', default=True) -parser.add_argument('--test', dest='test', action='store_true', default=True) - -parser.add_argument( - '--save_path', default=None, help='folder to save if mode == train else model path,' - 'qnet will be saved once target net update' -) -parser.add_argument('--seed', help='random seed', type=int, default=0) -parser.add_argument('--env_id', default='FrozenLake-v0') -args = parser.parse_args() - -## Load the environment -alg_name = 'Qlearning' -env_id = args.env_id -env = gym.make(env_id) -render = False # display the game environment - -##================= Implement Q-Table learning algorithm =====================## -## Initialize table with all zeros -Q = np.zeros([env.observation_space.n, env.action_space.n]) -## Set learning parameters -lr = .85 # alpha, if use value function approximation, we can ignore it -lambd = .99 # decay factor -num_episodes = 10000 -t0 = time.time() - -if args.train: - all_episode_reward = [] - for i in range(num_episodes): - ## Reset environment and get first new observation - s = env.reset() - rAll = 0 - ## The Q-Table learning algorithm - for j in range(99): - if render: env.render() - ## Choose an action by greedily (with noise) picking from Q table - a = np.argmax(Q[s, :] + np.random.randn(1, env.action_space.n) * (1. / (i + 1))) - ## Get new state and reward from environment - s1, r, d, _ = env.step(a) - ## Update Q-Table with new knowledge - Q[s, a] = Q[s, a] + lr * (r + lambd * np.max(Q[s1, :]) - Q[s, a]) - rAll += r - s = s1 - if d is True: - break - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - i + 1, num_episodes, rAll, - time.time() - t0 - ) - ) - if i == 0: - all_episode_reward.append(rAll) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + rAll * 0.1) - - # save - path = os.path.join('model', '_'.join([alg_name, env_id])) - if not os.path.exists(path): - os.makedirs(path) - np.save(os.path.join(path, 'Q_table.npy'), Q) - - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([alg_name, env_id]))) - - # print("Final Q-Table Values:/n %s" % Q) - -if args.test: - path = os.path.join('model', '_'.join([alg_name, env_id])) - Q = np.load(os.path.join(path, 'Q_table.npy')) - for i in range(num_episodes): - ## Reset environment and get first new observation - s = env.reset() - rAll = 0 - ## The Q-Table learning algorithm - for j in range(99): - ## Choose an action by greedily (with noise) picking from Q table - a = np.argmax(Q[s, :]) - ## Get new state and reward from environment - s1, r, d, _ = env.step(a) - ## Update Q-Table with new knowledge - rAll += r - s = s1 - if d is True: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - i + 1, num_episodes, rAll, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_SAC.py b/examples/reinforcement_learning/tutorial_SAC.py deleted file mode 100644 index ef9b28d44..000000000 --- a/examples/reinforcement_learning/tutorial_SAC.py +++ /dev/null @@ -1,453 +0,0 @@ -""" -Soft Actor-Critic (SAC) ------------------- -Actor policy in SAC is stochastic, with off-policy training. -And 'soft' in SAC indicates the trade-off between the entropy and expected return. -The additional consideration of entropy term helps with more explorative policy. -And this implementation contains an automatic update for the entropy factor. -This version of Soft Actor-Critic (SAC) implementation contains 5 networks: -2 Q net, 2 target Q net, 1 policy net. -It uses alpha loss. -Reference ---------- -paper: https://arxiv.org/pdf/1812.05905.pdf -Environment ---- -Openai Gym Pendulum-v0, continuous action space -https://gym.openai.com/envs/Pendulum-v0/ -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 -&& -pip install box2d box2d-kengz --user -To run ------- -python tutorial_SAC.py --train/test -""" - -import argparse -import os -import random -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorflow_probability as tfp -import tensorlayer as tl -from tensorlayer.layers import Dense -from tensorlayer.models import Model - -Normal = tfp.distributions.Normal -tl.logging.set_verbosity(tl.logging.DEBUG) - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'Pendulum-v0' # environment id -RANDOM_SEED = 2 # random seed -RENDER = False # render while training - -# RL training -ALG_NAME = 'SAC' -TRAIN_EPISODES = 100 # total number of episodes for training -TEST_EPISODES = 10 # total number of episodes for training -MAX_STEPS = 200 # total number of steps for each episode -EXPLORE_STEPS = 100 # 500 for random action sampling in the beginning of training - -BATCH_SIZE = 256 # update batch size -HIDDEN_DIM = 32 # size of hidden layers for networks -UPDATE_ITR = 3 # repeated updates for single step -SOFT_Q_LR = 3e-4 # q_net learning rate -POLICY_LR = 3e-4 # policy_net learning rate -ALPHA_LR = 3e-4 # alpha learning rate -POLICY_TARGET_UPDATE_INTERVAL = 3 # delayed update for the policy network and target networks -REWARD_SCALE = 1. # value range of reward -REPLAY_BUFFER_SIZE = 5e5 # size of the replay buffer - -AUTO_ENTROPY = True # automatically updating variable alpha for entropy - -############################### SAC #################################### - - -class ReplayBuffer: - """ - a ring buffer for storing transitions and sampling for training - :state: (state_dim,) - :action: (action_dim,) - :reward: (,), scalar - :next_state: (state_dim,) - :done: (,), scalar (0 and 1) or bool (True and False) - """ - - def __init__(self, capacity): - self.capacity = capacity - self.buffer = [] - self.position = 0 - - def push(self, state, action, reward, next_state, done): - if len(self.buffer) < self.capacity: - self.buffer.append(None) - self.buffer[self.position] = (state, action, reward, next_state, done) - self.position = int((self.position + 1) % self.capacity) # as a ring buffer - - def sample(self, BATCH_SIZE): - batch = random.sample(self.buffer, BATCH_SIZE) - state, action, reward, next_state, done = map(np.stack, zip(*batch)) # stack for each element - """ - the * serves as unpack: sum(a,b) <=> batch=(a,b), sum(*batch) ; - zip: a=[1,2], b=[2,3], zip(a,b) => [(1, 2), (2, 3)] ; - the map serves as mapping the function on each list element: map(square, [2,3]) => [4,9] ; - np.stack((1,2)) => array([1, 2]) - """ - return state, action, reward, next_state, done - - def __len__(self): - return len(self.buffer) - - -class SoftQNetwork(Model): - """ the network for evaluate values of state-action pairs: Q(s,a) """ - - def __init__(self, num_inputs, num_actions, hidden_dim, init_w=3e-3): - super(SoftQNetwork, self).__init__() - input_dim = num_inputs + num_actions - w_init = tf.keras.initializers.glorot_normal( - seed=None - ) # glorot initialization is better than uniform in practice - # w_init = tf.random_uniform_initializer(-init_w, init_w) - - self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=input_dim, name='q1') - self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='q2') - self.linear3 = Dense(n_units=1, W_init=w_init, in_channels=hidden_dim, name='q3') - - def forward(self, input): - x = self.linear1(input) - x = self.linear2(x) - x = self.linear3(x) - return x - - -class PolicyNetwork(Model): - """ the network for generating non-determinstic (Gaussian distributed) action from the state input """ - - def __init__( - self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2 - ): - super(PolicyNetwork, self).__init__() - - self.log_std_min = log_std_min - self.log_std_max = log_std_max - - w_init = tf.keras.initializers.glorot_normal(seed=None) - # w_init = tf.random_uniform_initializer(-init_w, init_w) - - self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=num_inputs, name='policy1') - self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy2') - self.linear3 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy3') - - self.mean_linear = Dense( - n_units=num_actions, W_init=w_init, b_init=tf.random_uniform_initializer(-init_w, init_w), - in_channels=hidden_dim, name='policy_mean' - ) - self.log_std_linear = Dense( - n_units=num_actions, W_init=w_init, b_init=tf.random_uniform_initializer(-init_w, init_w), - in_channels=hidden_dim, name='policy_logstd' - ) - - self.action_range = action_range - self.num_actions = num_actions - - def forward(self, state): - x = self.linear1(state) - x = self.linear2(x) - x = self.linear3(x) - - mean = self.mean_linear(x) - log_std = self.log_std_linear(x) - log_std = tf.clip_by_value(log_std, self.log_std_min, self.log_std_max) - - return mean, log_std - - def evaluate(self, state, epsilon=1e-6): - """ generate action with state for calculating gradients """ - state = state.astype(np.float32) - mean, log_std = self.forward(state) - std = tf.math.exp(log_std) # no clip in evaluation, clip affects gradients flow - - normal = Normal(0, 1) - z = normal.sample(mean.shape) - action_0 = tf.math.tanh(mean + std * z) # TanhNormal distribution as actions; reparameterization trick - action = self.action_range * action_0 - # according to original paper, with an extra last term for normalizing different action range - log_prob = Normal(mean, std).log_prob(mean + std * z) - tf.math.log(1. - action_0**2 + - epsilon) - np.log(self.action_range) - # both dims of normal.log_prob and -log(1-a**2) are (N,dim_of_action); - # the Normal.log_prob outputs the same dim of input features instead of 1 dim probability, - # needs sum up across the dim of actions to get 1 dim probability; or else use Multivariate Normal. - log_prob = tf.reduce_sum(log_prob, axis=1)[:, np.newaxis] # expand dim as reduce_sum causes 1 dim reduced - - return action, log_prob, z, mean, log_std - - def get_action(self, state, greedy=False): - """ generate action with state for interaction with envronment """ - mean, log_std = self.forward([state]) - std = tf.math.exp(log_std) - - normal = Normal(0, 1) - z = normal.sample(mean.shape) - action = self.action_range * tf.math.tanh( - mean + std * z - ) # TanhNormal distribution as actions; reparameterization trick - - action = self.action_range * tf.math.tanh(mean) if greedy else action - return action.numpy()[0] - - def sample_action(self, ): - """ generate random actions for exploration """ - a = tf.random.uniform([self.num_actions], -1, 1) - return self.action_range * a.numpy() - - -class SAC: - - def __init__( - self, state_dim, action_dim, action_range, hidden_dim, replay_buffer, SOFT_Q_LR=3e-4, POLICY_LR=3e-4, - ALPHA_LR=3e-4 - ): - self.replay_buffer = replay_buffer - - # initialize all networks - self.soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim) - self.soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim) - self.target_soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim) - self.target_soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim) - self.policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range) - self.soft_q_net1.train() - self.soft_q_net2.train() - self.target_soft_q_net1.eval() - self.target_soft_q_net2.eval() - self.policy_net.train() - - self.log_alpha = tf.Variable(0, dtype=np.float32, name='log_alpha') - self.alpha = tf.math.exp(self.log_alpha) - print('Soft Q Network (1,2): ', self.soft_q_net1) - print('Policy Network: ', self.policy_net) - # set mode - self.soft_q_net1.train() - self.soft_q_net2.train() - self.target_soft_q_net1.eval() - self.target_soft_q_net2.eval() - self.policy_net.train() - - # initialize weights of target networks - self.target_soft_q_net1 = self.target_ini(self.soft_q_net1, self.target_soft_q_net1) - self.target_soft_q_net2 = self.target_ini(self.soft_q_net2, self.target_soft_q_net2) - - self.soft_q_optimizer1 = tf.optimizers.Adam(SOFT_Q_LR) - self.soft_q_optimizer2 = tf.optimizers.Adam(SOFT_Q_LR) - self.policy_optimizer = tf.optimizers.Adam(POLICY_LR) - self.alpha_optimizer = tf.optimizers.Adam(ALPHA_LR) - - def target_ini(self, net, target_net): - """ hard-copy update for initializing target networks """ - for target_param, param in zip(target_net.trainable_weights, net.trainable_weights): - target_param.assign(param) - return target_net - - def target_soft_update(self, net, target_net, soft_tau): - """ soft update the target net with Polyak averaging """ - for target_param, param in zip(target_net.trainable_weights, net.trainable_weights): - target_param.assign( # copy weight value into target parameters - target_param * (1.0 - soft_tau) + param * soft_tau - ) - return target_net - - def update(self, batch_size, reward_scale=10., auto_entropy=True, target_entropy=-2, gamma=0.99, soft_tau=1e-2): - """ update all networks in SAC """ - state, action, reward, next_state, done = self.replay_buffer.sample(batch_size) - - reward = reward[:, np.newaxis] # expand dim - done = done[:, np.newaxis] - - reward = reward_scale * (reward - np.mean(reward, axis=0)) / ( - np.std(reward, axis=0) + 1e-6 - ) # normalize with batch mean and std; plus a small number to prevent numerical problem - - # Training Q Function - new_next_action, next_log_prob, _, _, _ = self.policy_net.evaluate(next_state) - target_q_input = tf.concat([next_state, new_next_action], 1) # the dim 0 is number of samples - target_q_min = tf.minimum( - self.target_soft_q_net1(target_q_input), self.target_soft_q_net2(target_q_input) - ) - self.alpha * next_log_prob - target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward - q_input = tf.concat([state, action], 1) # the dim 0 is number of samples - - with tf.GradientTape() as q1_tape: - predicted_q_value1 = self.soft_q_net1(q_input) - q_value_loss1 = tf.reduce_mean(tf.losses.mean_squared_error(predicted_q_value1, target_q_value)) - q1_grad = q1_tape.gradient(q_value_loss1, self.soft_q_net1.trainable_weights) - self.soft_q_optimizer1.apply_gradients(zip(q1_grad, self.soft_q_net1.trainable_weights)) - - with tf.GradientTape() as q2_tape: - predicted_q_value2 = self.soft_q_net2(q_input) - q_value_loss2 = tf.reduce_mean(tf.losses.mean_squared_error(predicted_q_value2, target_q_value)) - q2_grad = q2_tape.gradient(q_value_loss2, self.soft_q_net2.trainable_weights) - self.soft_q_optimizer2.apply_gradients(zip(q2_grad, self.soft_q_net2.trainable_weights)) - - # Training Policy Function - with tf.GradientTape() as p_tape: - new_action, log_prob, z, mean, log_std = self.policy_net.evaluate(state) - new_q_input = tf.concat([state, new_action], 1) # the dim 0 is number of samples - """ implementation 1 """ - predicted_new_q_value = tf.minimum(self.soft_q_net1(new_q_input), self.soft_q_net2(new_q_input)) - # """ implementation 2 """ - # predicted_new_q_value = self.soft_q_net1(new_q_input) - policy_loss = tf.reduce_mean(self.alpha * log_prob - predicted_new_q_value) - p_grad = p_tape.gradient(policy_loss, self.policy_net.trainable_weights) - self.policy_optimizer.apply_gradients(zip(p_grad, self.policy_net.trainable_weights)) - - # Updating alpha w.r.t entropy - # alpha: trade-off between exploration (max entropy) and exploitation (max Q) - if auto_entropy is True: - with tf.GradientTape() as alpha_tape: - alpha_loss = -tf.reduce_mean((self.log_alpha * (log_prob + target_entropy))) - alpha_grad = alpha_tape.gradient(alpha_loss, [self.log_alpha]) - self.alpha_optimizer.apply_gradients(zip(alpha_grad, [self.log_alpha])) - self.alpha = tf.math.exp(self.log_alpha) - else: # fixed alpha - self.alpha = 1. - alpha_loss = 0 - - # Soft update the target value nets - self.target_soft_q_net1 = self.target_soft_update(self.soft_q_net1, self.target_soft_q_net1, soft_tau) - self.target_soft_q_net2 = self.target_soft_update(self.soft_q_net2, self.target_soft_q_net2, soft_tau) - - def save(self): # save trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - extend_path = lambda s: os.path.join(path, s) - tl.files.save_npz(self.soft_q_net1.trainable_weights, extend_path('model_q_net1.npz')) - tl.files.save_npz(self.soft_q_net2.trainable_weights, extend_path('model_q_net2.npz')) - tl.files.save_npz(self.target_soft_q_net1.trainable_weights, extend_path('model_target_q_net1.npz')) - tl.files.save_npz(self.target_soft_q_net2.trainable_weights, extend_path('model_target_q_net2.npz')) - tl.files.save_npz(self.policy_net.trainable_weights, extend_path('model_policy_net.npz')) - np.save(extend_path('log_alpha.npy'), self.log_alpha.numpy()) # save log_alpha variable - - def load_weights(self): # load trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - extend_path = lambda s: os.path.join(path, s) - tl.files.load_and_assign_npz(extend_path('model_q_net1.npz'), self.soft_q_net1) - tl.files.load_and_assign_npz(extend_path('model_q_net2.npz'), self.soft_q_net2) - tl.files.load_and_assign_npz(extend_path('model_target_q_net1.npz'), self.target_soft_q_net1) - tl.files.load_and_assign_npz(extend_path('model_target_q_net2.npz'), self.target_soft_q_net2) - tl.files.load_and_assign_npz(extend_path('model_policy_net.npz'), self.policy_net) - self.log_alpha.assign(np.load(extend_path('log_alpha.npy'))) # load log_alpha variable - - -if __name__ == '__main__': - # initialization of env - env = gym.make(ENV_ID).unwrapped - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_range = env.action_space.high # scale action, [-action_range, action_range] - - # reproducible - env.seed(RANDOM_SEED) - random.seed(RANDOM_SEED) - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - - # initialization of buffer - replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE) - # initialization of trainer - agent = SAC(state_dim, action_dim, action_range, HIDDEN_DIM, replay_buffer, SOFT_Q_LR, POLICY_LR, ALPHA_LR) - - t0 = time.time() - # training loop - if args.train: - frame_idx = 0 - all_episode_reward = [] - - # need an extra call here to make inside functions be able to use model.forward - state = env.reset().astype(np.float32) - agent.policy_net([state]) - - for episode in range(TRAIN_EPISODES): - state = env.reset().astype(np.float32) - episode_reward = 0 - for step in range(MAX_STEPS): - if RENDER: - env.render() - if frame_idx > EXPLORE_STEPS: - action = agent.policy_net.get_action(state) - else: - action = agent.policy_net.sample_action() - - next_state, reward, done, _ = env.step(action) - next_state = next_state.astype(np.float32) - done = 1 if done is True else 0 - - replay_buffer.push(state, action, reward, next_state, done) - state = next_state - episode_reward += reward - frame_idx += 1 - - if len(replay_buffer) > BATCH_SIZE: - for i in range(UPDATE_ITR): - agent.update( - BATCH_SIZE, reward_scale=REWARD_SCALE, auto_entropy=AUTO_ENTROPY, - target_entropy=-1. * action_dim - ) - - if done: - break - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, - time.time() - t0 - ) - ) - agent.save() - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - agent.load_weights() - - # need an extra call here to make inside functions be able to use model.forward - state = env.reset().astype(np.float32) - agent.policy_net([state]) - - for episode in range(TEST_EPISODES): - state = env.reset().astype(np.float32) - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - state, reward, done, info = env.step(agent.policy_net.get_action(state, greedy=True)) - state = state.astype(np.float32) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_TD3.py b/examples/reinforcement_learning/tutorial_TD3.py deleted file mode 100644 index 531eb20f7..000000000 --- a/examples/reinforcement_learning/tutorial_TD3.py +++ /dev/null @@ -1,436 +0,0 @@ -""" -Twin Delayed DDPG (TD3) ------------------------- -DDPG suffers from problems like overestimate of Q-values and sensitivity to hyper-parameters. -Twin Delayed DDPG (TD3) is a variant of DDPG with several tricks: -* Trick One: Clipped Double-Q Learning. TD3 learns two Q-functions instead of one (hence "twin"), -and uses the smaller of the two Q-values to form the targets in the Bellman error loss functions. - -* Trick Two: "Delayed" Policy Updates. TD3 updates the policy (and target networks) less frequently -than the Q-function. - -* Trick Three: Target Policy Smoothing. TD3 adds noise to the target action, to make it harder for -the policy to exploit Q-function errors by smoothing out Q along changes in action. - -The implementation of TD3 includes 6 networks: 2 Q-net, 2 target Q-net, 1 policy net, 1 target policy net -Actor policy in TD3 is deterministic, with Gaussian exploration noise. - -Reference ---------- -original paper: https://arxiv.org/pdf/1802.09477.pdf - - -Environment ---- -Openai Gym Pendulum-v0, continuous action space -https://gym.openai.com/envs/Pendulum-v0/ - -Prerequisites ---- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 - -&& -pip install box2d box2d-kengz --user - -To run -------- -python tutorial_TD3.py --train/test - -""" - -import argparse -import os -import random -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorflow_probability as tfp -import tensorlayer as tl -from tensorlayer.layers import Dense -from tensorlayer.models import Model - -Normal = tfp.distributions.Normal -tl.logging.set_verbosity(tl.logging.DEBUG) - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### -# choose env -ENV_ID = 'Pendulum-v0' # environment id -RANDOM_SEED = 2 # random seed -RENDER = False # render while training - -# RL training -ALG_NAME = 'TD3' -TRAIN_EPISODES = 100 # total number of episodes for training -TEST_EPISODES = 10 # total number of episodes for training -MAX_STEPS = 200 # maximum number of steps for one episode -BATCH_SIZE = 64 # update batch size -EXPLORE_STEPS = 500 # 500 for random action sampling in the beginning of training - -HIDDEN_DIM = 64 # size of hidden layers for networks -UPDATE_ITR = 3 # repeated updates for single step -Q_LR = 3e-4 # q_net learning rate -POLICY_LR = 3e-4 # policy_net learning rate -POLICY_TARGET_UPDATE_INTERVAL = 3 # delayed steps for updating the policy network and target networks -EXPLORE_NOISE_SCALE = 1.0 # range of action noise for exploration -EVAL_NOISE_SCALE = 0.5 # range of action noise for evaluation of action value -REWARD_SCALE = 1. # value range of reward -REPLAY_BUFFER_SIZE = 5e5 # size of replay buffer - -############################### TD3 #################################### - - -class ReplayBuffer: - """ - a ring buffer for storing transitions and sampling for training - :state: (state_dim,) - :action: (action_dim,) - :reward: (,), scalar - :next_state: (state_dim,) - :done: (,), scalar (0 and 1) or bool (True and False) - """ - - def __init__(self, capacity): - self.capacity = capacity - self.buffer = [] - self.position = 0 - - def push(self, state, action, reward, next_state, done): - if len(self.buffer) < self.capacity: - self.buffer.append(None) - self.buffer[self.position] = (state, action, reward, next_state, done) - self.position = int((self.position + 1) % self.capacity) # as a ring buffer - - def sample(self, batch_size): - batch = random.sample(self.buffer, batch_size) - state, action, reward, next_state, done = map(np.stack, zip(*batch)) # stack for each element - """ - the * serves as unpack: sum(a,b) <=> batch=(a,b), sum(*batch) ; - zip: a=[1,2], b=[2,3], zip(a,b) => [(1, 2), (2, 3)] ; - the map serves as mapping the function on each list element: map(square, [2,3]) => [4,9] ; - np.stack((1,2)) => array([1, 2]) - """ - return state, action, reward, next_state, done - - def __len__(self): - return len(self.buffer) - - -class QNetwork(Model): - """ the network for evaluate values of state-action pairs: Q(s,a) """ - - def __init__(self, num_inputs, num_actions, hidden_dim, init_w=3e-3): - super(QNetwork, self).__init__() - input_dim = num_inputs + num_actions - # w_init = tf.keras.initializers.glorot_normal(seed=None) - w_init = tf.random_uniform_initializer(-init_w, init_w) - - self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=input_dim, name='q1') - self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='q2') - self.linear3 = Dense(n_units=1, W_init=w_init, in_channels=hidden_dim, name='q3') - - def forward(self, input): - x = self.linear1(input) - x = self.linear2(x) - x = self.linear3(x) - return x - - -class PolicyNetwork(Model): - """ the network for generating non-determinstic (Gaussian distributed) action from the state input """ - - def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3): - super(PolicyNetwork, self).__init__() - w_init = tf.random_uniform_initializer(-init_w, init_w) - - self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=num_inputs, name='policy1') - self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy2') - self.linear3 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy3') - self.output_linear = Dense( - n_units=num_actions, W_init=w_init, b_init=tf.random_uniform_initializer(-init_w, init_w), - in_channels=hidden_dim, name='policy_output' - ) - self.action_range = action_range - self.num_actions = num_actions - - def forward(self, state): - x = self.linear1(state) - x = self.linear2(x) - x = self.linear3(x) - output = tf.nn.tanh(self.output_linear(x)) # unit range output [-1, 1] - return output - - def evaluate(self, state, eval_noise_scale): - """ - generate action with state for calculating gradients; - eval_noise_scale: as the trick of target policy smoothing, for generating noisy actions. - """ - state = state.astype(np.float32) - action = self.forward(state) - - action = self.action_range * action - - # add noise - normal = Normal(0, 1) - eval_noise_clip = 2 * eval_noise_scale - noise = normal.sample(action.shape) * eval_noise_scale - noise = tf.clip_by_value(noise, -eval_noise_clip, eval_noise_clip) - action = action + noise - return action - - def get_action(self, state, explore_noise_scale, greedy=False): - """ generate action with state for interaction with envronment """ - action = self.forward([state]) - action = self.action_range * action.numpy()[0] - if greedy: - return action - # add noise - normal = Normal(0, 1) - noise = normal.sample(action.shape) * explore_noise_scale - action += noise - return action.numpy() - - def sample_action(self): - """ generate random actions for exploration """ - a = tf.random.uniform([self.num_actions], -1, 1) - return self.action_range * a.numpy() - - -class TD3: - - def __init__( - self, state_dim, action_dim, action_range, hidden_dim, replay_buffer, policy_target_update_interval=1, - q_lr=3e-4, policy_lr=3e-4 - ): - self.replay_buffer = replay_buffer - - # initialize all networks - self.q_net1 = QNetwork(state_dim, action_dim, hidden_dim) - self.q_net2 = QNetwork(state_dim, action_dim, hidden_dim) - self.target_q_net1 = QNetwork(state_dim, action_dim, hidden_dim) - self.target_q_net2 = QNetwork(state_dim, action_dim, hidden_dim) - self.policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range) - self.target_policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range) - print('Q Network (1,2): ', self.q_net1) - print('Policy Network: ', self.policy_net) - - # initialize weights of target networks - self.target_q_net1 = self.target_ini(self.q_net1, self.target_q_net1) - self.target_q_net2 = self.target_ini(self.q_net2, self.target_q_net2) - self.target_policy_net = self.target_ini(self.policy_net, self.target_policy_net) - - # set train mode - self.q_net1.train() - self.q_net2.train() - self.target_q_net1.eval() - self.target_q_net2.eval() - self.policy_net.train() - self.target_policy_net.eval() - - self.update_cnt = 0 - self.policy_target_update_interval = policy_target_update_interval - - self.q_optimizer1 = tf.optimizers.Adam(q_lr) - self.q_optimizer2 = tf.optimizers.Adam(q_lr) - self.policy_optimizer = tf.optimizers.Adam(policy_lr) - - def target_ini(self, net, target_net): - """ hard-copy update for initializing target networks """ - for target_param, param in zip(target_net.trainable_weights, net.trainable_weights): - target_param.assign(param) - return target_net - - def target_soft_update(self, net, target_net, soft_tau): - """ soft update the target net with Polyak averaging """ - for target_param, param in zip(target_net.trainable_weights, net.trainable_weights): - target_param.assign( # copy weight value into target parameters - target_param * (1.0 - soft_tau) + param * soft_tau - ) - return target_net - - def update(self, batch_size, eval_noise_scale, reward_scale=10., gamma=0.9, soft_tau=1e-2): - """ update all networks in TD3 """ - self.update_cnt += 1 - state, action, reward, next_state, done = self.replay_buffer.sample(batch_size) - - reward = reward[:, np.newaxis] # expand dim - done = done[:, np.newaxis] - - new_next_action = self.target_policy_net.evaluate( - next_state, eval_noise_scale=eval_noise_scale - ) # clipped normal noise - reward = reward_scale * (reward - np.mean(reward, axis=0)) / ( - np.std(reward, axis=0) + 1e-6 - ) # normalize with batch mean and std; plus a small number to prevent numerical problem - - # Training Q Function - target_q_input = tf.concat([next_state, new_next_action], 1) # the dim 0 is number of samples - target_q_min = tf.minimum(self.target_q_net1(target_q_input), self.target_q_net2(target_q_input)) - - target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward - q_input = tf.concat([state, action], 1) # input of q_net - - with tf.GradientTape() as q1_tape: - predicted_q_value1 = self.q_net1(q_input) - q_value_loss1 = tf.reduce_mean(tf.square(predicted_q_value1 - target_q_value)) - q1_grad = q1_tape.gradient(q_value_loss1, self.q_net1.trainable_weights) - self.q_optimizer1.apply_gradients(zip(q1_grad, self.q_net1.trainable_weights)) - - with tf.GradientTape() as q2_tape: - predicted_q_value2 = self.q_net2(q_input) - q_value_loss2 = tf.reduce_mean(tf.square(predicted_q_value2 - target_q_value)) - q2_grad = q2_tape.gradient(q_value_loss2, self.q_net2.trainable_weights) - self.q_optimizer2.apply_gradients(zip(q2_grad, self.q_net2.trainable_weights)) - - # Training Policy Function - if self.update_cnt % self.policy_target_update_interval == 0: - with tf.GradientTape() as p_tape: - new_action = self.policy_net.evaluate( - state, eval_noise_scale=0.0 - ) # no noise, deterministic policy gradients - new_q_input = tf.concat([state, new_action], 1) - # """ implementation 1 """ - # predicted_new_q_value = tf.minimum(self.q_net1(new_q_input),self.q_net2(new_q_input)) - """ implementation 2 """ - predicted_new_q_value = self.q_net1(new_q_input) - policy_loss = -tf.reduce_mean(predicted_new_q_value) - p_grad = p_tape.gradient(policy_loss, self.policy_net.trainable_weights) - self.policy_optimizer.apply_gradients(zip(p_grad, self.policy_net.trainable_weights)) - - # Soft update the target nets - self.target_q_net1 = self.target_soft_update(self.q_net1, self.target_q_net1, soft_tau) - self.target_q_net2 = self.target_soft_update(self.q_net2, self.target_q_net2, soft_tau) - self.target_policy_net = self.target_soft_update(self.policy_net, self.target_policy_net, soft_tau) - - def save(self): # save trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - extend_path = lambda s: os.path.join(path, s) - tl.files.save_npz(self.q_net1.trainable_weights, extend_path('model_q_net1.npz')) - tl.files.save_npz(self.q_net2.trainable_weights, extend_path('model_q_net2.npz')) - tl.files.save_npz(self.target_q_net1.trainable_weights, extend_path('model_target_q_net1.npz')) - tl.files.save_npz(self.target_q_net2.trainable_weights, extend_path('model_target_q_net2.npz')) - tl.files.save_npz(self.policy_net.trainable_weights, extend_path('model_policy_net.npz')) - tl.files.save_npz(self.target_policy_net.trainable_weights, extend_path('model_target_policy_net.npz')) - - def load(self): # load trained weights - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - extend_path = lambda s: os.path.join(path, s) - tl.files.load_and_assign_npz(extend_path('model_q_net1.npz'), self.q_net1) - tl.files.load_and_assign_npz(extend_path('model_q_net2.npz'), self.q_net2) - tl.files.load_and_assign_npz(extend_path('model_target_q_net1.npz'), self.target_q_net1) - tl.files.load_and_assign_npz(extend_path('model_target_q_net2.npz'), self.target_q_net2) - tl.files.load_and_assign_npz(extend_path('model_policy_net.npz'), self.policy_net) - tl.files.load_and_assign_npz(extend_path('model_target_policy_net.npz'), self.target_policy_net) - - -if __name__ == '__main__': - # initialization of env - env = gym.make(ENV_ID).unwrapped - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_range = env.action_space.high # scale action, [-action_range, action_range] - - # reproducible - env.seed(RANDOM_SEED) - random.seed(RANDOM_SEED) - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - - # initialization of buffer - replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE) - # initialization of trainer - agent = TD3( - state_dim, action_dim, action_range, HIDDEN_DIM, replay_buffer, POLICY_TARGET_UPDATE_INTERVAL, Q_LR, POLICY_LR - ) - t0 = time.time() - - # training loop - if args.train: - frame_idx = 0 - all_episode_reward = [] - - # need an extra call here to make inside functions be able to use model.forward - state = env.reset().astype(np.float32) - agent.policy_net([state]) - agent.target_policy_net([state]) - - for episode in range(TRAIN_EPISODES): - state = env.reset().astype(np.float32) - episode_reward = 0 - - for step in range(MAX_STEPS): - if RENDER: - env.render() - if frame_idx > EXPLORE_STEPS: - action = agent.policy_net.get_action(state, EXPLORE_NOISE_SCALE) - else: - action = agent.policy_net.sample_action() - - next_state, reward, done, _ = env.step(action) - next_state = next_state.astype(np.float32) - done = 1 if done is True else 0 - - replay_buffer.push(state, action, reward, next_state, done) - state = next_state - episode_reward += reward - frame_idx += 1 - - if len(replay_buffer) > BATCH_SIZE: - for i in range(UPDATE_ITR): - agent.update(BATCH_SIZE, EVAL_NOISE_SCALE, REWARD_SCALE) - - if done: - break - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, - time.time() - t0 - ) - ) - agent.save() - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - agent.load() - - # need an extra call here to make inside functions be able to use model.forward - state = env.reset().astype(np.float32) - agent.policy_net([state]) - - for episode in range(TEST_EPISODES): - state = env.reset().astype(np.float32) - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - action = agent.policy_net.get_action(state, EXPLORE_NOISE_SCALE, greedy=True) - state, reward, done, info = env.step(action) - state = state.astype(np.float32) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_TRPO.py b/examples/reinforcement_learning/tutorial_TRPO.py deleted file mode 100644 index ae47a20bd..000000000 --- a/examples/reinforcement_learning/tutorial_TRPO.py +++ /dev/null @@ -1,512 +0,0 @@ -""" -Trust Region Policy Optimization (TRPO) ---------------------------------------- -PG method with a large step can collapse the policy performance, -even with a small step can lead a large differences in policy. -TRPO constraint the step in policy space using KL divergence (rather than in parameter space), -which can monotonically improve performance and avoid a collapsed update. - -Reference ---------- -Trust Region Policy Optimization, Schulman et al. 2015 -High Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016 -Approximately Optimal Approximate Reinforcement Learning, Kakade and Langford 2002 -openai/spinningup : http://spinningup.openai.com/en/latest/algorithms/trpo.html - -Environment ------------ -Openai Gym Pendulum-v0, continual action space - -Prerequisites --------------- -tensorflow >=2.0.0a0 -tensorflow-probability 0.6.0 -tensorlayer >=2.0.0 - -To run ------- -python tutorial_TRPO.py --train/test - -""" -import argparse -import copy -import os -import threading -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import scipy.signal -import tensorflow as tf - -import tensorflow_probability as tfp -import tensorlayer as tl - -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### - -ENV_ID = 'Pendulum-v0' # environment id -RANDOM_SEED = 2 # random seed -RENDER = False - -ALG_NAME = 'TRPO' -TRAIN_EPISODES = 1000 # total number of episodes for training -TEST_EPISODES = 100 # total number of episodes for testing -MAX_STEPS = 200 # total number of steps for each episode - -HIDDEN_SIZES = [64, 64] # hidden layer size -GAMMA = 0.99 # reward discount -DELTA = 0.01 # KL-divergence limit for TRPO update. -VF_LR = 1e-3 # Learning rate for value function optimizer -TRAIN_VF_ITERS = 100 # Number of gradient descent steps to take on value function per epoch -DAMPING_COEFF = 0.1 # Artifact for numerical stability -CG_ITERS = 10 # Number of iterations of conjugate gradient to perform -BACKTRACK_ITERS = 10 # Maximum number of steps allowed in the backtracking line search -BACKTRACK_COEFF = 0.8 # How far back to step during backtracking line search -LAM = 0.97 # lambda for GAE-lambda -SAVE_FREQ = 10 # How often (in terms of gap between epochs) to save the current policy and value function -EPS = 1e-8 # epsilon -BATCH_SIZE = 512 # batch size - -##################### functions #################### - - -class GAE_Buffer: - """ - A buffer for storing trajectories experienced by a TRPO agent interacting - with the environment, and using Generalized Advantage Estimation (GAE-lambda) - for calculating the advantages of state-action pairs. - """ - - def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95): - self.obs_buf = np.zeros((size, obs_dim), dtype=np.float32) - self.act_buf = np.zeros((size, act_dim), dtype=np.float32) - self.adv_buf = np.zeros(size, dtype=np.float32) - self.rew_buf = np.zeros(size, dtype=np.float32) - self.ret_buf = np.zeros(size, dtype=np.float32) - self.val_buf = np.zeros(size, dtype=np.float32) - self.logp_buf = np.zeros(size, dtype=np.float32) - self.mean_buf = np.zeros(size, dtype=np.float32) - self.log_std_buf = np.zeros(size, dtype=np.float32) - self.gamma, self.lam = gamma, lam - self.ptr, self.path_start_idx, self.max_size = 0, 0, size - - def store(self, obs, act, rew, val, logp, mean, log_std): - """ - Append one timestep of agent-environment interaction to the buffer. - """ - assert self.ptr < self.max_size # buffer has to have room so you can store - self.obs_buf[self.ptr] = obs - self.act_buf[self.ptr] = act - self.rew_buf[self.ptr] = rew - self.val_buf[self.ptr] = val - self.logp_buf[self.ptr] = logp - self.mean_buf[self.ptr] = mean - self.log_std_buf[self.ptr] = log_std - self.ptr += 1 - - def finish_path(self, last_val=0): - """ - Call this at the end of a trajectory, or when one gets cut off - by an epoch ending. This looks back in the buffer to where the - trajectory started, and uses rewards and value estimates from - the whole trajectory to compute advantage estimates with GAE-lambda, - as well as compute the rewards-to-go for each state, to use as - the targets for the value function. - - The "last_val" argument should be 0 if the trajectory ended - because the agent reached a terminal state (died), and otherwise - should be V(s_T), the value function estimated for the last state. - This allows us to bootstrap the reward-to-go calculation to account - for timesteps beyond the arbitrary episode horizon (or epoch cutoff). - """ - path_slice = slice(self.path_start_idx, self.ptr) - rews = np.append(self.rew_buf[path_slice], last_val) - vals = np.append(self.val_buf[path_slice], last_val) - # the next two lines implement GAE-lambda advantage calculation - deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1] - self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam) - - # the next line computes rewards-to-go, to be targets for the value function - self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1] - - self.path_start_idx = self.ptr - - def _discount_cumsum(self, x, discount): - """ - magic from rllab for computing discounted cumulative sums of vectors. - - input: - vector x, - [x0, - x1, - x2] - - output: - [x0 + discount * x1 + discount^2 * x2, - x1 + discount * x2, - x2] - """ - return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1] - - def is_full(self): - return self.ptr == self.max_size - - def get(self): - """ - Call this at the end of an epoch to get all of the data from - the buffer, with advantages appropriately normalized (shifted to have - mean zero and std one). Also, resets some pointers in the buffer. - """ - assert self.ptr == self.max_size # buffer has to be full before you can get - self.ptr, self.path_start_idx = 0, 0 - - # the next two lines implement the advantage normalization trick - adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf) - self.adv_buf = (self.adv_buf - adv_mean) / adv_std - return [self.obs_buf, self.act_buf, self.adv_buf, self.ret_buf, self.logp_buf, self.mean_buf, self.log_std_buf] - - -""" -Trust Region Policy Optimization -""" - - -class TRPO: - """ - trpo class - """ - - def __init__(self, state_dim, action_dim, action_bound): - # critic - with tf.name_scope('critic'): - layer = input_layer = tl.layers.Input([None, state_dim], tf.float32) - for d in HIDDEN_SIZES: - layer = tl.layers.Dense(d, tf.nn.relu)(layer) - v = tl.layers.Dense(1)(layer) - self.critic = tl.models.Model(input_layer, v) - self.critic.train() - - # actor - with tf.name_scope('actor'): - layer = input_layer = tl.layers.Input([None, state_dim], tf.float32) - for d in HIDDEN_SIZES: - layer = tl.layers.Dense(d, tf.nn.relu)(layer) - mean = tl.layers.Dense(action_dim, tf.nn.tanh)(layer) - mean = tl.layers.Lambda(lambda x: x * action_bound)(mean) - log_std = tf.Variable(np.zeros(action_dim, dtype=np.float32)) - - self.actor = tl.models.Model(input_layer, mean) - self.actor.trainable_weights.append(log_std) - self.actor.log_std = log_std - self.actor.train() - - self.buf = GAE_Buffer(state_dim, action_dim, BATCH_SIZE, GAMMA, LAM) - self.critic_optimizer = tf.optimizers.Adam(learning_rate=VF_LR) - self.action_bound = action_bound - - def get_action(self, state, greedy=False): - """ - get action - :param state: state input - :param greedy: get action greedy or not - :return: pi, v, logp_pi, mean, log_std - """ - state = np.array([state], np.float32) - mean = self.actor(state) - log_std = tf.convert_to_tensor(self.actor.log_std) - std = tf.exp(log_std) - std = tf.ones_like(mean) * std - pi = tfp.distributions.Normal(mean, std) - - if greedy: - action = mean - else: - action = pi.sample() - action = np.clip(action, -self.action_bound, self.action_bound) - logp_pi = pi.log_prob(action) - - value = self.critic(state) - return action[0], value, logp_pi, mean, log_std - - def pi_loss(self, states, actions, adv, old_log_prob): - """ - calculate pi loss - :param states: state batch - :param actions: action batch - :param adv: advantage batch - :param old_log_prob: old log probability - :return: pi loss - """ - mean = self.actor(states) - pi = tfp.distributions.Normal(mean, tf.exp(self.actor.log_std)) - log_prob = pi.log_prob(actions)[:, 0] - ratio = tf.exp(log_prob - old_log_prob) - surr = tf.reduce_mean(ratio * adv) - return -surr - - def gradient(self, states, actions, adv, old_log_prob): - """ - pi gradients - :param states: state batch - :param actions: actions batch - :param adv: advantage batch - :param old_log_prob: old log probability batch - :return: gradient - """ - pi_params = self.actor.trainable_weights - with tf.GradientTape() as tape: - loss = self.pi_loss(states, actions, adv, old_log_prob) - grad = tape.gradient(loss, pi_params) - gradient = self._flat_concat(grad) - return gradient, loss - - def train_vf(self, states, rewards_to_go): - """ - train v function - :param states: state batch - :param rewards_to_go: rewards-to-go batch - :return: None - """ - with tf.GradientTape() as tape: - value = self.critic(states) - loss = tf.reduce_mean((rewards_to_go - value[:, 0])**2) - grad = tape.gradient(loss, self.critic.trainable_weights) - self.critic_optimizer.apply_gradients(zip(grad, self.critic.trainable_weights)) - - def kl(self, states, old_mean, old_log_std): - """ - calculate kl-divergence - :param states: state batch - :param old_mean: mean batch of the old pi - :param old_log_std: log std batch of the old pi - :return: kl_mean or None - """ - old_mean = old_mean[:, np.newaxis] - old_log_std = old_log_std[:, np.newaxis] - old_std = tf.exp(old_log_std) - old_pi = tfp.distributions.Normal(old_mean, old_std) - - mean = self.actor(states) - std = tf.exp(self.actor.log_std) * tf.ones_like(mean) - pi = tfp.distributions.Normal(mean, std) - - kl = tfp.distributions.kl_divergence(pi, old_pi) - all_kls = tf.reduce_sum(kl, axis=1) - return tf.reduce_mean(all_kls) - - def _flat_concat(self, xs): - """ - flat concat input - :param xs: a list of tensor - :return: flat tensor - """ - return tf.concat([tf.reshape(x, (-1, )) for x in xs], axis=0) - - def get_pi_params(self): - """ - get actor trainable parameters - :return: flat actor trainable parameters - """ - pi_params = self.actor.trainable_weights - return self._flat_concat(pi_params) - - def set_pi_params(self, flat_params): - """ - set actor trainable parameters - :param flat_params: inputs - :return: None - """ - pi_params = self.actor.trainable_weights - flat_size = lambda p: int(np.prod(p.shape.as_list())) # the 'int' is important for scalars - splits = tf.split(flat_params, [flat_size(p) for p in pi_params]) - new_params = [tf.reshape(p_new, p.shape) for p, p_new in zip(pi_params, splits)] - return tf.group([p.assign(p_new) for p, p_new in zip(pi_params, new_params)]) - - def save(self): - """ - save trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.save_weights_to_hdf5(os.path.join(path, 'critic.hdf5'), self.critic) - - def load(self): - """ - load trained weights - :return: None - """ - path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'actor.hdf5'), self.actor) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'critic.hdf5'), self.critic) - - def cg(self, Ax, b): - """ - Conjugate gradient algorithm - (see https://en.wikipedia.org/wiki/Conjugate_gradient_method) - """ - x = np.zeros_like(b) - r = copy.deepcopy(b) # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start. - p = copy.deepcopy(r) - r_dot_old = np.dot(r, r) - for _ in range(CG_ITERS): - z = Ax(p) - alpha = r_dot_old / (np.dot(p, z) + EPS) - x += alpha * p - r -= alpha * z - r_dot_new = np.dot(r, r) - p = r + (r_dot_new / r_dot_old) * p - r_dot_old = r_dot_new - return x - - def hvp(self, states, old_mean, old_log_std, x): - """ - calculate Hessian-vector product - :param states: state batch - :param old_mean: mean batch of the old pi - :param old_log_std: log std batch of the old pi - :return: hvp - """ - pi_params = self.actor.trainable_weights - with tf.GradientTape() as tape1: - with tf.GradientTape() as tape0: - d_kl = self.kl(states, old_mean, old_log_std) - g = self._flat_concat(tape0.gradient(d_kl, pi_params)) - l = tf.reduce_sum(g * x) - hvp = self._flat_concat(tape1.gradient(l, pi_params)) - - if DAMPING_COEFF > 0: - hvp += DAMPING_COEFF * x - return hvp - - def update(self): - """ - update trpo - :return: None - """ - states, actions, adv, rewards_to_go, logp_old_ph, old_mu, old_log_std = self.buf.get() - g, pi_l_old = self.gradient(states, actions, adv, logp_old_ph) - - Hx = lambda x: self.hvp(states, old_mu, old_log_std, x) - x = self.cg(Hx, g) - - alpha = np.sqrt(2 * DELTA / (np.dot(x, Hx(x)) + EPS)) - old_params = self.get_pi_params() - - def set_and_eval(step): - params = old_params - alpha * x * step - self.set_pi_params(params) - d_kl = self.kl(states, old_mu, old_log_std) - loss = self.pi_loss(states, actions, adv, logp_old_ph) - return [d_kl, loss] - - # trpo with backtracking line search, hard kl - for j in range(BACKTRACK_ITERS): - kl, pi_l_new = set_and_eval(step=BACKTRACK_COEFF**j) - if kl <= DELTA and pi_l_new <= pi_l_old: - # Accepting new params at step of line search - break - else: - # Line search failed! Keeping old params. - set_and_eval(step=0.) - - # Value function updates - for _ in range(TRAIN_VF_ITERS): - self.train_vf(states, rewards_to_go) - - def finish_path(self, done, next_state): - """ - finish a trajectory - :param done: whether the epoch is done - :param next_state: next state - :return: None - """ - if not done: - next_state = np.array([next_state], np.float32) - last_val = self.critic(next_state) - else: - last_val = 0 - self.buf.finish_path(last_val) - - -if __name__ == '__main__': - env = gym.make(ENV_ID).unwrapped - - # reproducible - np.random.seed(RANDOM_SEED) - tf.random.set_seed(RANDOM_SEED) - env.seed(RANDOM_SEED) - - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - action_bound = env.action_space.high - - agent = TRPO(state_dim, action_dim, action_bound) - - t0 = time.time() - if args.train: # train - all_episode_reward = [] - for episode in range(TRAIN_EPISODES): - state = env.reset() - state = np.array(state, np.float32) - episode_reward = 0 - for step in range(MAX_STEPS): - if RENDER: - env.render() - action, value, logp, mean, log_std = agent.get_action(state) - next_state, reward, done, _ = env.step(action) - next_state = np.array(next_state, np.float32) - agent.buf.store(state, action, reward, value, logp, mean, log_std) - episode_reward += reward - state = next_state - if agent.buf.is_full(): - agent.finish_path(done, next_state) - agent.update() - if done: - break - agent.finish_path(done, next_state) - if episode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - print( - 'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TRAIN_EPISODES, episode_reward, - time.time() - t0 - ) - ) - if episode % SAVE_FREQ == 0: - agent.save() - agent.save() - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID]))) - - if args.test: - # test - agent.load() - for episode in range(TEST_EPISODES): - state = env.reset() - episode_reward = 0 - for step in range(MAX_STEPS): - env.render() - action, *_ = agent.get_action(state, greedy=True) - state, reward, done, info = env.step(action) - episode_reward += reward - if done: - break - print( - 'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - episode + 1, TEST_EPISODES, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_atari_pong.py b/examples/reinforcement_learning/tutorial_atari_pong.py deleted file mode 100644 index e28f70bec..000000000 --- a/examples/reinforcement_learning/tutorial_atari_pong.py +++ /dev/null @@ -1,147 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""Monte-Carlo Policy Network π(a|s) (REINFORCE). -To understand Reinforcement Learning, we let computer to learn how to play -Pong game from the original screen inputs. Before we start, we highly recommend -you to go through a famous blog called “Deep Reinforcement Learning: Pong from -Pixels” which is a minimalistic implementation of deep reinforcement learning by -using python-numpy and OpenAI gym environment. -The code here is the reimplementation of Karpathy's Blog by using TensorLayer. -Compare with Karpathy's code, we store observation for a batch, but he store -observation for only one episode and gradients. (so we will use -more memory if the observation is very large.) - -TODO ------ -- update grads every step rather than storing all observation! -- tensorlayer@gmail.com - -References ------------- -- http://karpathy.github.io/2016/05/31/rl/ -""" -import time - -import gym -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -tl.logging.set_verbosity(tl.logging.DEBUG) - -# hyper-parameters -image_size = 80 -D = image_size * image_size -H = 200 -batch_size = 10 -learning_rate = 1e-4 -gamma = 0.99 -decay_rate = 0.99 -render = False # display the game environment -# resume = True # load existing policy network -model_file_name = "model_pong" -np.set_printoptions(threshold=np.inf) - - -def prepro(I): - """Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.""" - I = I[35:195] - I = I[::2, ::2, 0] - I[I == 144] = 0 - I[I == 109] = 0 - I[I != 0] = 1 - return I.astype(np.float32).ravel() - - -env = gym.make("Pong-v0") -observation = env.reset() -prev_x = None -running_reward = None -reward_sum = 0 -episode_number = 0 - -xs, ys, rs = [], [], [] - - -# policy network -def get_model(inputs_shape): - ni = tl.layers.Input(inputs_shape) - nn = tl.layers.Dense(n_units=H, act=tf.nn.relu, name='hidden')(ni) - nn = tl.layers.Dense(n_units=3, name='output')(nn) - M = tl.models.Model(inputs=ni, outputs=nn, name="mlp") - return M - - -model = get_model([None, D]) -train_weights = model.trainable_weights - -optimizer = tf.optimizers.RMSprop(lr=learning_rate, decay=decay_rate) - -model.train() # set model to train mode (in case you add dropout into the model) - -start_time = time.time() -game_number = 0 -while True: - if render: - env.render() - - cur_x = prepro(observation) - x = cur_x - prev_x if prev_x is not None else np.zeros(D, dtype=np.float32) - x = x.reshape(1, D) - prev_x = cur_x - - _prob = model(x) - prob = tf.nn.softmax(_prob) - - # action. 1: STOP 2: UP 3: DOWN - # action = np.random.choice([1,2,3], p=prob.flatten()) - # action = tl.rein.choice_action_by_probs(prob.flatten(), [1, 2, 3]) - action = tl.rein.choice_action_by_probs(prob[0].numpy(), [1, 2, 3]) - - observation, reward, done, _ = env.step(action) - reward_sum += reward - xs.append(x) # all observations in an episode - ys.append(action - 1) # all fake labels in an episode (action begins from 1, so minus 1) - rs.append(reward) # all rewards in an episode - - if done: - episode_number += 1 - game_number = 0 - - if episode_number % batch_size == 0: - print('batch over...... updating parameters......') - epx = np.vstack(xs) - epy = np.asarray(ys) - epr = np.asarray(rs) - disR = tl.rein.discount_episode_rewards(epr, gamma) - disR -= np.mean(disR) - disR /= np.std(disR) - - xs, ys, rs = [], [], [] - - with tf.GradientTape() as tape: - _prob = model(epx) - _loss = tl.rein.cross_entropy_reward_loss(_prob, epy, disR) - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## TODO - # if episode_number % (batch_size * 100) == 0: - # tl.files.save_npz(network.all_params, name=model_file_name + '.npz') - - running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01 - print('resetting env. episode reward total was {}. running mean: {}'.format(reward_sum, running_reward)) - reward_sum = 0 - observation = env.reset() # reset env - prev_x = None - - if reward != 0: - print( - ( - 'episode %d: game %d took %.5fs, reward: %f' % - (episode_number, game_number, time.time() - start_time, reward) - ), ('' if reward == -1 else ' !!!!!!!!') - ) - start_time = time.time() - game_number += 1 diff --git a/examples/reinforcement_learning/tutorial_format.py b/examples/reinforcement_learning/tutorial_format.py deleted file mode 100644 index cd27ef2c4..000000000 --- a/examples/reinforcement_learning/tutorial_format.py +++ /dev/null @@ -1,98 +0,0 @@ -# the format of turorial algorithm # -# please heavily annotate the code # -''' -Algorithm Name ------------------------- -Briefly describe the algorithms, add some details. - -Reference ---------- -original paper: e.g. https://arxiv.org/pdf/1802.09477.pdf -website: ... - - -Environment ------------ -e.g. Openai Gym Pendulum-v0, continuous action space - -Prerequisites ---------------- -tensorflow >=2.0.0a0 -tensorlayer >=2.0.0 -... - -To run -------- -python tutorial_***.py --train/test - -''' - -import argparse -import time - -import numpy as np -import tensorflow as tf - -# import 'other package name' - -np.random.seed(2) -tf.random.set_seed(2) # reproducible - -# add arguments in command --train/test -parser = argparse.ArgumentParser(description='Train or test neural net motor controller.') -parser.add_argument('--train', dest='train', action='store_true', default=False) -parser.add_argument('--test', dest='test', action='store_true', default=True) -args = parser.parse_args() - -##################### hyper parameters #################### -A = a # description of hyper parameter -B = b # description of hyper parameter - -############################### Algorithm Name #################################### - - -class C(): # algorithm-specific classes - ''' description of class ''' - - def C1(): - ''' description of function''' - - -def D(): # some common functions, could be extracted into utils afterwards - ''' description of function ''' - - -if __name__ == '__main__': - '''initialization of env, buffer, networks in algorithms''' - env = 'env model' - buffer = 'buffer model' - network1 = 'network model1' - network2 = 'network model2' - - # training loop - if args.train: - t0 = time.time() - while NOT_FINISHED: # loop of episodes - while NOT_DONE: # loop of steps in episode - ''' step ''' - ''' train ''' - - print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'\ - .format(episode, all_episodes, episode_reward, time.time()-t0 )) - ''' plot , following the format of ./baselines/utils/plot()''' - plot(rewards, Algorithm_name='SAC', Env_name='Pendulum-v0') - ''' save weights, implemented in defined classes above, following the format of ./baselines/utils/save_model() ''' - model.save_weights() - - # testing loop - if args.test: - t0 = time.time() - ''' save weights, implemented in defined classes above, following the format of ./baselines/utils/load_model() ''' - model.load_weights() - - while NOT_FINISHED: # loop of episodes - while NOT_DONE: # loop of steps in episode - ''' step ''' - - print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'\ - .format(episode, all_episodes, episode_reward, time.time()-t0 ) ) diff --git a/examples/reinforcement_learning/tutorial_prioritized_replay.py b/examples/reinforcement_learning/tutorial_prioritized_replay.py deleted file mode 100644 index f2c5745fd..000000000 --- a/examples/reinforcement_learning/tutorial_prioritized_replay.py +++ /dev/null @@ -1,527 +0,0 @@ -""" -Prioritized Experience Replay ------------------------- -Prioritized experience replay is an efficient replay method that replay -important transitions more frequently. Segment tree data structure is used to -speed up indexing. -Reference: ------------------------- -Schaul T, Quan J, Antonoglou I, et al. Prioritized experience replay[J]. arXiv -preprint arXiv:1511.05952, 2015. -Dhariwal P, Hesse C, Klimov O, et al. Openai baselines (2017)[J]. URL -https://github. com/opfenai/baselines. -Environment: ------------------------- -Cartpole and Pong in OpenAI Gym -Requirements: ------------------------- -tensorflow>=2.0.0a0 -tensorlayer>=2.0.0 -To run: ------------------------- -python tutorial_prioritized_replay.py --mode=train -python tutorial_prioritized_replay.py --mode=test --save_path=per/8000.npz -""" -import argparse -import operator -import os -import random -import time - -import gym -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -parser = argparse.ArgumentParser() -# add arguments in command --train/test -parser.add_argument('--train', dest='train', action='store_true', default=True) -parser.add_argument('--test', dest='test', action='store_true', default=True) -parser.add_argument( - '--save_path', default=None, help='folder to save if mode == train else model path,' - 'qnet will be saved once target net update' -) -parser.add_argument('--seed', help='random seed', type=int, default=0) -parser.add_argument('--env_id', default='CartPole-v0', help='CartPole-v0 or PongNoFrameskip-v4') -args = parser.parse_args() - -random.seed(args.seed) -np.random.seed(args.seed) -tf.random.set_seed(args.seed) # reproducible -env_id = args.env_id -env = gym.make(env_id) -env.seed(args.seed) -alg_name = 'prioritized_replay' - -# #################### hyper parameters #################### -if env_id == 'CartPole-v0': - qnet_type = 'MLP' - number_timesteps = 10000 # total number of time steps to train on - explore_timesteps = 100 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 5e-3 # learning rate - buffer_size = 1000 # replay buffer size - target_q_update_freq = 50 # how frequency target q net update - ob_scale = 1.0 # scale observations - clipnorm = None -else: - # reward will increase obviously after 1e5 time steps - qnet_type = 'CNN' - number_timesteps = int(1e6) # total number of time steps to train on - explore_timesteps = 1e5 - # epsilon-greedy schedule, final exploit prob is 0.99 - epsilon = lambda i_iter: 1 - 0.99 * min(1, i_iter / explore_timesteps) - lr = 1e-4 # learning rate - buffer_size = 10000 # replay buffer size - target_q_update_freq = 200 # how frequency target q net update - ob_scale = 1.0 / 255 # scale observations - clipnorm = 10 - -in_dim = env.observation_space.shape -out_dim = env.action_space.n -reward_gamma = 0.99 # reward discount -batch_size = 32 # batch size for sampling from replay buffer -warm_start = buffer_size / 10 # sample times befor learning -prioritized_replay_alpha = 0.6 # alpha in PER -prioritized_replay_beta0 = 0.4 # initial beta in PER - - -# ############################## Network #################################### -class MLP(tl.models.Model): - - def __init__(self, name): - super(MLP, self).__init__(name=name) - self.h1 = tl.layers.Dense(64, tf.nn.tanh, in_channels=in_dim[0]) - self.qvalue = tl.layers.Dense(out_dim, in_channels=64, name='q', W_init=tf.initializers.GlorotUniform()) - - def forward(self, ni): - return self.qvalue(self.h1(ni)) - - -class CNN(tl.models.Model): - - def __init__(self, name): - super(CNN, self).__init__(name=name) - h, w, in_channels = in_dim - dense_in_channels = 64 * ((h - 28) // 8) * ((w - 28) // 8) - self.conv1 = tl.layers.Conv2d( - 32, (8, 8), (4, 4), tf.nn.relu, 'VALID', in_channels=in_channels, name='conv2d_1', - W_init=tf.initializers.GlorotUniform() - ) - self.conv2 = tl.layers.Conv2d( - 64, (4, 4), (2, 2), tf.nn.relu, 'VALID', in_channels=32, name='conv2d_2', - W_init=tf.initializers.GlorotUniform() - ) - self.conv3 = tl.layers.Conv2d( - 64, (3, 3), (1, 1), tf.nn.relu, 'VALID', in_channels=64, name='conv2d_3', - W_init=tf.initializers.GlorotUniform() - ) - self.flatten = tl.layers.Flatten(name='flatten') - self.preq = tl.layers.Dense( - 256, tf.nn.relu, in_channels=dense_in_channels, name='pre_q', W_init=tf.initializers.GlorotUniform() - ) - self.qvalue = tl.layers.Dense(out_dim, in_channels=256, name='q', W_init=tf.initializers.GlorotUniform()) - - def forward(self, ni): - feature = self.flatten(self.conv3(self.conv2(self.conv1(ni)))) - return self.qvalue(self.preq(feature)) - - -# ############################## Replay #################################### -class SegmentTree(object): - - def __init__(self, capacity, operation, neutral_element): - """Build a Segment Tree data structure. - https://en.wikipedia.org/wiki/Segment_tree - Can be used as regular array, but with two - important differences: - a) setting item's value is slightly slower. - It is O(lg capacity) instead of O(1). - b) user has access to an efficient ( O(log segment size) ) - `reduce` operation which reduces `operation` over - a contiguous subsequence of items in the array. - Paramters - --------- - capacity: int - Total size of the array - must be a power of two. - operation: lambda obj, obj -> obj - and operation for combining elements (eg. sum, max) - must form a mathematical group together with the set of - possible values for array elements (i.e. be associative) - neutral_element: obj - neutral element for the operation above. eg. float('-inf') - for max and 0 for sum. - """ - assert capacity > 0 and capacity & (capacity - 1) == 0, \ - "capacity must be positive and a power of 2." - self._capacity = capacity - self._value = [neutral_element for _ in range(2 * capacity)] - self._operation = operation - - def _reduce_helper(self, start, end, node, node_start, node_end): - if start == node_start and end == node_end: - return self._value[node] - mid = (node_start + node_end) // 2 - if end <= mid: - return self._reduce_helper(start, end, 2 * node, node_start, mid) - else: - if mid + 1 <= start: - return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end) - else: - return self._operation( - self._reduce_helper(start, mid, 2 * node, node_start, mid), - self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end) - ) - - def reduce(self, start=0, end=None): - """Returns result of applying `self.operation` - to a contiguous subsequence of the array. - Parameters - ---------- - start: int - beginning of the subsequence - end: int - end of the subsequences - Returns - ------- - reduced: obj - result of reducing self.operation over the specified range of array. - """ - if end is None: - end = self._capacity - if end < 0: - end += self._capacity - end -= 1 - return self._reduce_helper(start, end, 1, 0, self._capacity - 1) - - def __setitem__(self, idx, val): - # index of the leaf - idx += self._capacity - self._value[idx] = val - idx //= 2 - while idx >= 1: - self._value[idx] = self._operation(self._value[2 * idx], self._value[2 * idx + 1]) - idx //= 2 - - def __getitem__(self, idx): - assert 0 <= idx < self._capacity - return self._value[self._capacity + idx] - - -class SumSegmentTree(SegmentTree): - - def __init__(self, capacity): - super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0) - - def sum(self, start=0, end=None): - """Returns arr[start] + ... + arr[end]""" - return super(SumSegmentTree, self).reduce(start, end) - - def find_prefixsum_idx(self, prefixsum): - """Find the highest index `i` in the array such that - sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum - if array values are probabilities, this function - allows to sample indexes according to the discrete - probability efficiently. - Parameters - ---------- - perfixsum: float - upperbound on the sum of array prefix - Returns - ------- - idx: int - highest index satisfying the prefixsum constraint - """ - assert 0 <= prefixsum <= self.sum() + 1e-5 - idx = 1 - while idx < self._capacity: # while non-leaf - if self._value[2 * idx] > prefixsum: - idx = 2 * idx - else: - prefixsum -= self._value[2 * idx] - idx = 2 * idx + 1 - return idx - self._capacity - - -class MinSegmentTree(SegmentTree): - - def __init__(self, capacity): - super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, neutral_element=float('inf')) - - def min(self, start=0, end=None): - """Returns min(arr[start], ..., arr[end])""" - - return super(MinSegmentTree, self).reduce(start, end) - - -class ReplayBuffer(object): - - def __init__(self, size): - self._storage = [] - self._maxsize = size - self._next_idx = 0 - - def __len__(self): - return len(self._storage) - - def add(self, *args): - if self._next_idx >= len(self._storage): - self._storage.append(args) - else: - self._storage[self._next_idx] = args - self._next_idx = (self._next_idx + 1) % self._maxsize - - def _encode_sample(self, idxes): - b_o, b_a, b_r, b_o_, b_d = [], [], [], [], [] - for i in idxes: - o, a, r, o_, d = self._storage[i] - b_o.append(o) - b_a.append(a) - b_r.append(r) - b_o_.append(o_) - b_d.append(d) - return ( - np.stack(b_o).astype('float32') * ob_scale, - np.stack(b_a).astype('int32'), - np.stack(b_r).astype('float32'), - np.stack(b_o_).astype('float32') * ob_scale, - np.stack(b_d).astype('float32'), - ) - - def sample(self, batch_size): - indexes = range(len(self._storage)) - idxes = [random.choice(indexes) for _ in range(batch_size)] - return self._encode_sample(idxes) - - -class PrioritizedReplayBuffer(ReplayBuffer): - - def __init__(self, size, alpha, beta): - """Create Prioritized Replay buffer. - Parameters - ---------- - size: int - Max number of transitions to store in the buffer. When the buffer - overflows the old memories are dropped. - alpha: float - how much prioritization is used - (0 - no prioritization, 1 - full prioritization) - See Also - -------- - ReplayBuffer.__init__ - """ - super(PrioritizedReplayBuffer, self).__init__(size) - assert alpha >= 0 - self._alpha = alpha - - it_capacity = 1 - while it_capacity < size: - it_capacity *= 2 - - self._it_sum = SumSegmentTree(it_capacity) - self._it_min = MinSegmentTree(it_capacity) - self._max_priority = 1.0 - self.beta = beta - - def add(self, *args): - """See ReplayBuffer.store_effect""" - idx = self._next_idx - super().add(*args) - self._it_sum[idx] = self._max_priority**self._alpha - self._it_min[idx] = self._max_priority**self._alpha - - def _sample_proportional(self, batch_size): - res = [] - p_total = self._it_sum.sum(0, len(self._storage) - 1) - every_range_len = p_total / batch_size - for i in range(batch_size): - mass = random.random() * every_range_len + i * every_range_len - idx = self._it_sum.find_prefixsum_idx(mass) - res.append(idx) - return res - - def sample(self, batch_size): - """Sample a batch of experiences""" - idxes = self._sample_proportional(batch_size) - - it_sum = self._it_sum.sum() - p_min = self._it_min.min() / it_sum - max_weight = (p_min * len(self._storage))**(-self.beta) - - p_samples = np.asarray([self._it_sum[idx] for idx in idxes]) / it_sum - weights = (p_samples * len(self._storage))**(-self.beta) / max_weight - encoded_sample = self._encode_sample(idxes) - return encoded_sample + (weights.astype('float32'), idxes) - - def update_priorities(self, idxes, priorities): - """Update priorities of sampled transitions""" - assert len(idxes) == len(priorities) - for idx, priority in zip(idxes, priorities): - assert priority > 0 - assert 0 <= idx < len(self._storage) - self._it_sum[idx] = priority**self._alpha - self._it_min[idx] = priority**self._alpha - - self._max_priority = max(self._max_priority, priority) - - -# ############################# Functions ################################### -def huber_loss(x): - """Loss function for value""" - return tf.where(tf.abs(x) < 1, tf.square(x) * 0.5, tf.abs(x) - 0.5) - - -def sync(net, net_tar): - """Copy q network to target q network""" - for var, var_tar in zip(net.trainable_weights, net_tar.trainable_weights): - var_tar.assign(var) - - -# ############################### DQN ##################################### -class DQN(object): - - def __init__(self): - model = MLP if qnet_type == 'MLP' else CNN - self.qnet = model('q') - if args.train: - self.qnet.train() - self.targetqnet = model('targetq') - self.targetqnet.infer() - sync(self.qnet, self.targetqnet) - else: - self.qnet.infer() - self.load(args.save_path) - self.niter = 0 - if clipnorm is not None: - self.optimizer = tf.optimizers.Adam(learning_rate=lr, clipnorm=clipnorm) - else: - self.optimizer = tf.optimizers.Adam(learning_rate=lr) - - def get_action(self, obv): - eps = epsilon(self.niter) - if args.train and random.random() < eps: - return int(random.random() * out_dim) - else: - obv = np.expand_dims(obv, 0).astype('float32') * ob_scale - return self._qvalues_func(obv).numpy().argmax(1)[0] - - @tf.function - def _qvalues_func(self, obv): - return self.qnet(obv) - - def train(self, b_o, b_a, b_r, b_o_, b_d, weights=None): - if weights is None: - weights = np.ones_like(b_r) - td_errors = self._train_func(b_o, b_a, b_r, b_o_, b_d, weights) - - self.niter += 1 - if self.niter % target_q_update_freq == 0: - sync(self.qnet, self.targetqnet) - self.save(args.save_path) - return td_errors.numpy() - - def save(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - if not os.path.exists(path): - os.makedirs(path) - tl.files.save_weights_to_hdf5(os.path.join(path, 'q_net.hdf5'), self.qnet) - - def load(self, path): - if path is None: - path = os.path.join('model', '_'.join([alg_name, env_id])) - tl.files.load_hdf5_to_weights_in_order(os.path.join(path, 'q_net.hdf5'), self.qnet) - - @tf.function - def _train_func(self, b_o, b_a, b_r, b_o_, b_d, weights): - with tf.GradientTape() as tape: - td_errors = self._tderror_func(b_o, b_a, b_r, b_o_, b_d) - loss = tf.reduce_mean(huber_loss(td_errors) * weights) - - grad = tape.gradient(loss, self.qnet.trainable_weights) - self.optimizer.apply_gradients(zip(grad, self.qnet.trainable_weights)) - - return td_errors - - @tf.function - def _tderror_func(self, b_o, b_a, b_r, b_o_, b_d): - b_q_ = (1 - b_d) * tf.reduce_max(self.targetqnet(b_o_), 1) - b_q = tf.reduce_sum(self.qnet(b_o) * tf.one_hot(b_a, out_dim), 1) - return b_q - (b_r + reward_gamma * b_q_) - - -# ############################# Trainer ################################### -if __name__ == '__main__': - dqn = DQN() - t0 = time.time() - if args.train: - buffer = PrioritizedReplayBuffer(buffer_size, prioritized_replay_alpha, prioritized_replay_beta0) - nepisode = 0 - all_episode_reward = [] - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - buffer.beta += (1 - prioritized_replay_beta0) / number_timesteps - - a = dqn.get_action(o) - - # execute action and feed to replay buffer - # note that `_` tail in var name means next - o_, r, done, info = env.step(a) - buffer.add(o, a, r, o_, done) - episode_reward += r - - if i >= warm_start: - *transitions, idxs = buffer.sample(batch_size) - priorities = dqn.train(*transitions) - priorities = np.clip(np.abs(priorities), 1e-6, None) - buffer.update_priorities(idxs, priorities) - - if done: - break - else: - o = o_ - - if nepisode == 0: - all_episode_reward.append(episode_reward) - else: - all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1) - nepisode += 1 - print( - 'Training | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) # episode num starts from 1 in print - - dqn.save(args.save_path) - plt.plot(all_episode_reward) - if not os.path.exists('image'): - os.makedirs('image') - plt.savefig(os.path.join('image', '_'.join([alg_name, env_id]))) - - if args.test: - nepisode = 0 - for i in range(1, number_timesteps + 1): - o = env.reset() - episode_reward = 0 - while True: - env.render() - a = dqn.get_action(o) - o_, r, done, info = env.step(a) - episode_reward += r - if done: - break - else: - o = o_ - nepisode += 1 - print( - 'Testing | Episode: {} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format( - nepisode, episode_reward, - time.time() - t0 - ) - ) diff --git a/examples/reinforcement_learning/tutorial_wrappers.py b/examples/reinforcement_learning/tutorial_wrappers.py deleted file mode 100644 index c7395f063..000000000 --- a/examples/reinforcement_learning/tutorial_wrappers.py +++ /dev/null @@ -1,563 +0,0 @@ -"""Env wrappers -Note that this file is adapted from `https://pypi.org/project/gym-vec-env` and -`https://github.com/openai/baselines/blob/master/baselines/common/*wrappers.py` -""" -from collections import deque -from functools import partial -from multiprocessing import Pipe, Process, cpu_count -from sys import platform - -import cv2 -import gym -import numpy as np -from gym import spaces - -__all__ = ( - 'build_env', # build env - 'TimeLimit', # Time limit wrapper - 'NoopResetEnv', # Run random number of no-ops on reset - 'FireResetEnv', # Reset wrapper for envs with fire action - 'EpisodicLifeEnv', # end-of-life == end-of-episode wrapper - 'MaxAndSkipEnv', # skip frame wrapper - 'ClipRewardEnv', # clip reward wrapper - 'WarpFrame', # warp observation wrapper - 'FrameStack', # stack frame wrapper - 'LazyFrames', # lazy store wrapper - 'RewardScaler', # reward scale - 'SubprocVecEnv', # vectorized env wrapper - 'VecFrameStack', # stack frames in vectorized env - 'Monitor', # Episode reward and length monitor -) -cv2.ocl.setUseOpenCL(False) -# env_id -> env_type -id2type = dict() -for _env in gym.envs.registry.all(): - id2type[_env.id] = _env._entry_point.split(':')[0].rsplit('.', 1)[1] - - -def build_env(env_id, vectorized=False, seed=0, reward_scale=1.0, nenv=0): - """Build env based on options""" - env_type = id2type[env_id] - nenv = nenv or cpu_count() // (1 + (platform == 'darwin')) - stack = env_type == 'atari' - if not vectorized: - env = _make_env(env_id, env_type, seed, reward_scale, stack) - else: - env = _make_vec_env(env_id, env_type, nenv, seed, reward_scale, stack) - - return env - - -def _make_env(env_id, env_type, seed, reward_scale, frame_stack=True): - """Make single env""" - if env_type == 'atari': - env = gym.make(env_id) - assert 'NoFrameskip' in env.spec.id - env = NoopResetEnv(env, noop_max=30) - env = MaxAndSkipEnv(env, skip=4) - env = Monitor(env) - # deepmind wrap - env = EpisodicLifeEnv(env) - if 'FIRE' in env.unwrapped.get_action_meanings(): - env = FireResetEnv(env) - env = WarpFrame(env) - env = ClipRewardEnv(env) - if frame_stack: - env = FrameStack(env, 4) - elif env_type == 'classic_control': - env = Monitor(gym.make(env_id)) - else: - raise NotImplementedError - if reward_scale != 1: - env = RewardScaler(env, reward_scale) - env.seed(seed) - return env - - -def _make_vec_env(env_id, env_type, nenv, seed, reward_scale, frame_stack=True): - """Make vectorized env""" - env = SubprocVecEnv([partial(_make_env, env_id, env_type, seed + i, reward_scale, False) for i in range(nenv)]) - if frame_stack: - env = VecFrameStack(env, 4) - return env - - -class TimeLimit(gym.Wrapper): - - def __init__(self, env, max_episode_steps=None): - super(TimeLimit, self).__init__(env) - self._max_episode_steps = max_episode_steps - self._elapsed_steps = 0 - - def step(self, ac): - observation, reward, done, info = self.env.step(ac) - self._elapsed_steps += 1 - if self._elapsed_steps >= self._max_episode_steps: - done = True - info['TimeLimit.truncated'] = True - return observation, reward, done, info - - def reset(self, **kwargs): - self._elapsed_steps = 0 - return self.env.reset(**kwargs) - - -class NoopResetEnv(gym.Wrapper): - - def __init__(self, env, noop_max=30): - """Sample initial states by taking random number of no-ops on reset. - No-op is assumed to be action 0. - """ - super(NoopResetEnv, self).__init__(env) - self.noop_max = noop_max - self.override_num_noops = None - self.noop_action = 0 - assert env.unwrapped.get_action_meanings()[0] == 'NOOP' - - def reset(self, **kwargs): - """ Do no-op action for a number of steps in [1, noop_max].""" - self.env.reset(**kwargs) - if self.override_num_noops is not None: - noops = self.override_num_noops - else: - noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) - assert noops > 0 - obs = None - for _ in range(noops): - obs, _, done, _ = self.env.step(self.noop_action) - if done: - obs = self.env.reset(**kwargs) - return obs - - def step(self, ac): - return self.env.step(ac) - - -class FireResetEnv(gym.Wrapper): - - def __init__(self, env): - """Take action on reset for environments that are fixed until firing.""" - super(FireResetEnv, self).__init__(env) - assert env.unwrapped.get_action_meanings()[1] == 'FIRE' - assert len(env.unwrapped.get_action_meanings()) >= 3 - - def reset(self, **kwargs): - self.env.reset(**kwargs) - obs, _, done, _ = self.env.step(1) - if done: - self.env.reset(**kwargs) - obs, _, done, _ = self.env.step(2) - if done: - self.env.reset(**kwargs) - return obs - - def step(self, ac): - return self.env.step(ac) - - -class EpisodicLifeEnv(gym.Wrapper): - - def __init__(self, env): - """Make end-of-life == end-of-episode, but only reset on true game over. - Done by DeepMind for the DQN and co. since it helps value estimation. - """ - super(EpisodicLifeEnv, self).__init__(env) - self.lives = 0 - self.was_real_done = True - - def step(self, action): - obs, reward, done, info = self.env.step(action) - self.was_real_done = done - # check current lives, make loss of life terminal, - # then update lives to handle bonus lives - lives = self.env.unwrapped.ale.lives() - if 0 < lives < self.lives: - # for Qbert sometimes we stay in lives == 0 condition for a few - # frames so it's important to keep lives > 0, so that we only reset - # once the environment advertises done. - done = True - self.lives = lives - return obs, reward, done, info - - def reset(self, **kwargs): - """Reset only when lives are exhausted. - This way all states are still reachable even though lives are episodic, - and the learner need not know about any of this behind-the-scenes. - """ - if self.was_real_done: - obs = self.env.reset(**kwargs) - else: - # no-op step to advance from terminal/lost life state - obs, _, _, _ = self.env.step(0) - self.lives = self.env.unwrapped.ale.lives() - return obs - - -class MaxAndSkipEnv(gym.Wrapper): - - def __init__(self, env, skip=4): - """Return only every `skip`-th frame""" - super(MaxAndSkipEnv, self).__init__(env) - # most recent raw observations (for max pooling across time steps) - shape = (2, ) + env.observation_space.shape - self._obs_buffer = np.zeros(shape, dtype=np.uint8) - self._skip = skip - - def step(self, action): - """Repeat action, sum reward, and max over last observations.""" - total_reward = 0.0 - done = info = None - for i in range(self._skip): - obs, reward, done, info = self.env.step(action) - if i == self._skip - 2: - self._obs_buffer[0] = obs - if i == self._skip - 1: - self._obs_buffer[1] = obs - total_reward += reward - if done: - break - # Note that the observation on the done=True frame doesn't matter - max_frame = self._obs_buffer.max(axis=0) - - return max_frame, total_reward, done, info - - def reset(self, **kwargs): - return self.env.reset(**kwargs) - - -class ClipRewardEnv(gym.RewardWrapper): - - def __init__(self, env): - super(ClipRewardEnv, self).__init__(env) - - def reward(self, reward): - """Bin reward to {+1, 0, -1} by its sign.""" - return np.sign(reward) - - -class WarpFrame(gym.ObservationWrapper): - - def __init__(self, env, width=84, height=84, grayscale=True): - """Warp frames to 84x84 as done in the Nature paper and later work.""" - super(WarpFrame, self).__init__(env) - self.width = width - self.height = height - self.grayscale = grayscale - shape = (self.height, self.width, 1 if self.grayscale else 3) - self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8) - - def observation(self, frame): - if self.grayscale: - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) - size = (self.width, self.height) - frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA) - if self.grayscale: - frame = np.expand_dims(frame, -1) - return frame - - -class FrameStack(gym.Wrapper): - - def __init__(self, env, k): - """Stack k last frames. - Returns lazy array, which is much more memory efficient. - See Also `LazyFrames` - """ - super(FrameStack, self).__init__(env) - self.k = k - self.frames = deque([], maxlen=k) - shp = env.observation_space.shape - shape = shp[:-1] + (shp[-1] * k, ) - self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype) - - def reset(self): - ob = self.env.reset() - for _ in range(self.k): - self.frames.append(ob) - return np.asarray(self._get_ob()) - - def step(self, action): - ob, reward, done, info = self.env.step(action) - self.frames.append(ob) - return np.asarray(self._get_ob()), reward, done, info - - def _get_ob(self): - assert len(self.frames) == self.k - return LazyFrames(list(self.frames)) - - -class LazyFrames(object): - - def __init__(self, frames): - """This object ensures that common frames between the observations are - only stored once. It exists purely to optimize memory usage which can be - huge for DQN's 1M frames replay buffers. - - This object should only be converted to numpy array before being passed - to the model. You'd not believe how complex the previous solution was. - """ - self._frames = frames - self._out = None - - def _force(self): - if self._out is None: - self._out = np.concatenate(self._frames, axis=-1) - self._frames = None - return self._out - - def __array__(self, dtype=None): - out = self._force() - if dtype is not None: - out = out.astype(dtype) - return out - - def __len__(self): - return len(self._force()) - - def __getitem__(self, i): - return self._force()[i] - - -class RewardScaler(gym.RewardWrapper): - """Bring rewards to a reasonable scale for PPO. - This is incredibly important and effects performance drastically. - """ - - def __init__(self, env, scale=0.01): - super(RewardScaler, self).__init__(env) - self.scale = scale - - def reward(self, reward): - return reward * self.scale - - -class VecFrameStack(object): - - def __init__(self, env, k): - self.env = env - self.k = k - self.action_space = env.action_space - self.frames = deque([], maxlen=k) - shp = env.observation_space.shape - shape = shp[:-1] + (shp[-1] * k, ) - self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype) - - def reset(self): - ob = self.env.reset() - for _ in range(self.k): - self.frames.append(ob) - return np.asarray(self._get_ob()) - - def step(self, action): - ob, reward, done, info = self.env.step(action) - self.frames.append(ob) - return np.asarray(self._get_ob()), reward, done, info - - def _get_ob(self): - assert len(self.frames) == self.k - return LazyFrames(list(self.frames)) - - -def _worker(remote, parent_remote, env_fn_wrapper): - parent_remote.close() - env = env_fn_wrapper.x() - while True: - cmd, data = remote.recv() - if cmd == 'step': - ob, reward, done, info = env.step(data) - if done: - ob = env.reset() - remote.send((ob, reward, done, info)) - elif cmd == 'reset': - ob = env.reset() - remote.send(ob) - elif cmd == 'reset_task': - ob = env._reset_task() - remote.send(ob) - elif cmd == 'close': - remote.close() - break - elif cmd == 'get_spaces': - remote.send((env.observation_space, env.action_space)) - else: - raise NotImplementedError - - -class CloudpickleWrapper(object): - """ - Uses cloudpickle to serialize contents - """ - - def __init__(self, x): - self.x = x - - def __getstate__(self): - import cloudpickle - return cloudpickle.dumps(self.x) - - def __setstate__(self, ob): - import pickle - self.x = pickle.loads(ob) - - -class SubprocVecEnv(object): - - def __init__(self, env_fns): - """ - envs: list of gym environments to run in subprocesses - """ - self.num_envs = len(env_fns) - - self.waiting = False - self.closed = False - nenvs = len(env_fns) - self.nenvs = nenvs - self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) - zipped_args = zip(self.work_remotes, self.remotes, env_fns) - self.ps = [ - Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) - for (work_remote, remote, env_fn) in zipped_args - ] - - for p in self.ps: - # if the main process crashes, we should not cause things to hang - p.daemon = True - p.start() - for remote in self.work_remotes: - remote.close() - - self.remotes[0].send(('get_spaces', None)) - observation_space, action_space = self.remotes[0].recv() - self.observation_space = observation_space - self.action_space = action_space - - def _step_async(self, actions): - """ - Tell all the environments to start taking a step - with the given actions. - Call step_wait() to get the results of the step. - You should not call this if a step_async run is - already pending. - """ - for remote, action in zip(self.remotes, actions): - remote.send(('step', action)) - self.waiting = True - - def _step_wait(self): - """ - Wait for the step taken with step_async(). - Returns (obs, rews, dones, infos): - - obs: an array of observations, or a tuple of - arrays of observations. - - rews: an array of rewards - - dones: an array of "episode done" booleans - - infos: a sequence of info objects - """ - results = [remote.recv() for remote in self.remotes] - self.waiting = False - obs, rews, dones, infos = zip(*results) - return np.stack(obs), np.stack(rews), np.stack(dones), infos - - def reset(self): - """ - Reset all the environments and return an array of - observations, or a tuple of observation arrays. - If step_async is still doing work, that work will - be cancelled and step_wait() should not be called - until step_async() is invoked again. - """ - for remote in self.remotes: - remote.send(('reset', None)) - return np.stack([remote.recv() for remote in self.remotes]) - - def _reset_task(self): - for remote in self.remotes: - remote.send(('reset_task', None)) - return np.stack([remote.recv() for remote in self.remotes]) - - def close(self): - if self.closed: - return - if self.waiting: - for remote in self.remotes: - remote.recv() - for remote in self.remotes: - remote.send(('close', None)) - for p in self.ps: - p.join() - self.closed = True - - def __len__(self): - return self.nenvs - - def step(self, actions): - self._step_async(actions) - return self._step_wait() - - -class Monitor(gym.Wrapper): - - def __init__(self, env): - super(Monitor, self).__init__(env) - self._monitor_rewards = None - - def reset(self, **kwargs): - self._monitor_rewards = [] - return self.env.reset(**kwargs) - - def step(self, action): - o_, r, done, info = self.env.step(action) - self._monitor_rewards.append(r) - if done: - info['episode'] = {'r': sum(self._monitor_rewards), 'l': len(self._monitor_rewards)} - return o_, r, done, info - - -class NormalizedActions(gym.ActionWrapper): - - def _action(self, action): - low = self.action_space.low - high = self.action_space.high - - action = low + (action + 1.0) * 0.5 * (high - low) - action = np.clip(action, low, high) - - return action - - def _reverse_action(self, action): - low = self.action_space.low - high = self.action_space.high - - action = 2 * (action - low) / (high - low) - 1 - action = np.clip(action, low, high) - - return action - - -def unit_test(): - env_id = 'CartPole-v0' - unwrapped_env = gym.make(env_id) - wrapped_env = build_env(env_id, False) - o = wrapped_env.reset() - print('Reset {} observation shape {}'.format(env_id, o.shape)) - done = False - while not done: - a = unwrapped_env.action_space.sample() - o_, r, done, info = wrapped_env.step(a) - print('Take action {} get reward {} info {}'.format(a, r, info)) - - env_id = 'PongNoFrameskip-v4' - nenv = 2 - unwrapped_env = gym.make(env_id) - wrapped_env = build_env(env_id, True, nenv=nenv) - o = wrapped_env.reset() - print('Reset {} observation shape {}'.format(env_id, o.shape)) - for _ in range(1000): - a = [unwrapped_env.action_space.sample() for _ in range(nenv)] - a = np.asarray(a, 'int64') - o_, r, done, info = wrapped_env.step(a) - print('Take action {} get reward {} info {}'.format(a, r, info)) - - -if __name__ == '__main__': - unit_test() diff --git a/examples/spatial_transformer_network/README.md b/examples/spatial_transformer_network/README.md deleted file mode 100644 index b9daece2b..000000000 --- a/examples/spatial_transformer_network/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Spatial Transformer Networks - -[Spatial Transformer Networks](https://arxiv.org/abs/1506.02025) (STN) is a dynamic mechanism that produces transformations of input images (or feature maps)including scaling, cropping, rotations, as well as non-rigid deformations. This enables the network to not only select regions of an image that are most relevant (attention), but also to transform those regions to simplify recognition in the following layers. - -Video for different transformation [click me](https://drive.google.com/file/d/0B1nQa_sA3W2iN3RQLXVFRkNXN0k/view). - -In this repositary, we implemented a STN for [2D Affine Transformation](https://en.wikipedia.org/wiki/Affine_transformation) on MNIST dataset. We generated images with size of 40x40 from the original MNIST dataset, and distorted the images by random rotation, shifting, shearing and zoom in/out. The STN was able to learn to automatically apply transformations on distorted images via classification task. - - -
- -
- Fig 1:Transformation -
- - -
- -
- Fig 2:Network -
- -
- -
- Fig 3:Formula -
- -## Result - -After classification task, the STN is able to transform the distorted image from Fig 4 back to Fig 5. - -
- -
- Fig 4: Input -
- -
- -
- Fig 5: Output -
- diff --git a/examples/spatial_transformer_network/tutorial_spatial_transformer_network_dynamic.py b/examples/spatial_transformer_network/tutorial_spatial_transformer_network_dynamic.py deleted file mode 100644 index 40695339f..000000000 --- a/examples/spatial_transformer_network/tutorial_spatial_transformer_network_dynamic.py +++ /dev/null @@ -1,167 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import Model - -##================== PREPARE DATA ============================================## -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - - -def pad_distort_im_fn(x): - """ Zero pads an image to 40x40, and distort it. - - Examples - --------- - x = pad_distort_im_fn(X_train[0]) - print(x, x.shape, x.max()) - tl.vis.save_image(x, '_xd.png') - tl.vis.save_image(X_train[0], '_x.png') - """ - b = np.zeros((40, 40, 1), dtype=np.float32) - o = int((40 - 28) / 2) - b[o:o + 28, o:o + 28] = x - x = b - x = tl.prepro.rotation(x, rg=30, is_random=True, fill_mode='constant') - x = tl.prepro.shear(x, 0.05, is_random=True, fill_mode='constant') - x = tl.prepro.shift(x, wrg=0.25, hrg=0.25, is_random=True, fill_mode='constant') - x = tl.prepro.zoom(x, zoom_range=(0.95, 1.05)) - return x - - -def pad_distort_ims_fn(X): - """ Zero pads images to 40x40, and distort them. """ - X_40 = [] - for X_a, _ in tl.iterate.minibatches(X, X, 50, shuffle=False): - X_40.extend(tl.prepro.threading_data(X_a, fn=pad_distort_im_fn)) - X_40 = np.asarray(X_40) - return X_40 - - -# create dataset with size of 40x40 with distortion -X_train_40 = pad_distort_ims_fn(X_train) -X_val_40 = pad_distort_ims_fn(X_val) -X_test_40 = pad_distort_ims_fn(X_test) - -tl.vis.save_images(X_test[0:32], [4, 8], '_imgs_original.png') -tl.vis.save_images(X_test_40[0:32], [4, 8], '_imgs_distorted.png') - - -##================== DEFINE MODEL ============================================## -class Net(Model): - - def __init__(self): - super(Net, self).__init__() - - ## 1. Localisation network - # use MLP as the localisation net - self.flatten1 = Flatten() - self.dense1 = Dense(n_units=20, in_channels=1600, act=tf.nn.tanh) - self.dropout1 = Dropout(keep=0.8) - # you can also use CNN instead for MLP as the localisation net - - ## 2. Spatial transformer module (sampler) - self.stn = SpatialTransformer2dAffine(out_size=(40, 40), in_channels=20) - - ## 3. Classifier - self.conv1 = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', in_channels=1) - self.conv2 = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', in_channels=16) - self.flatten2 = Flatten() - self.dense2 = Dense(n_units=1024, in_channels=1600, act=tf.nn.relu) - self.dense3 = Dense(n_units=10, in_channels=1024, act=tf.identity) - - def forward(self, inputs): - theta_input = self.dropout1(self.dense1(self.flatten1(inputs))) - V = self.stn((theta_input, inputs)) - _logits = self.dense3(self.dense2(self.flatten2(self.conv2(self.conv1(V))))) - return _logits, V - - -net = Net() - -##================== DEFINE TRAIN OPS ========================================## -n_epoch = 100 -learning_rate = 0.0001 -print_freq = 10 -batch_size = 64 -train_weights = net.trainable_weights -optimizer = tf.optimizers.Adam(lr=learning_rate) - -##================== TRAINING ================================================## -print("Training ...") -for epoch in range(n_epoch): - start_time = time.time() - - net.train() # enable dropout - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train_40, y_train, batch_size, shuffle=True): - # input_dim must be of length 4 - X_train_a = tf.expand_dims(X_train_a, 3) - - with tf.GradientTape() as tape: - ## compute outputs - _logits, _ = net(X_train_a) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train() - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_train_a, name='train_loss') - - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - - net.eval() # disable dropout - - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - - train_loss, train_acc, n_iter = 0, 0, 0 - for X_train_a, y_train_a in tl.iterate.minibatches(X_train_40, y_train, batch_size, shuffle=False): - # input_dim must be of length 4 - X_train_a = tf.expand_dims(X_train_a, 3) - - _logits, _ = net(X_train_a) # alternatively, you can use MLP(x, is_train=False) and remove MLP.eval() - train_loss += tl.cost.cross_entropy(_logits, y_train_a, name='eval_train_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_train_a)) - n_iter += 1 - print(" train loss: %f" % (train_loss / n_iter)) - print(" train acc: %f" % (train_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_val_a, y_val_a in tl.iterate.minibatches(X_val_40, y_val, batch_size, shuffle=False): - # input_dim must be of length 4 - X_val_a = tf.expand_dims(X_val_a, 3) - - _logits, _ = net(X_val_a) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - n_iter += 1 - print(" val loss: %f" % (val_loss / n_iter)) - print(" val acc: %f" % (val_acc / n_iter)) - - print('save images') - _, trans_imgs = net(tf.expand_dims(X_test_40[0:64], 3)) - trans_imgs = trans_imgs.numpy() - tl.vis.save_images(trans_imgs[0:32], [4, 8], '_imgs_distorted_after_stn_%s.png' % epoch) - -##================== EVALUATION ==============================================## -print('Evaluation') - -net.eval() - -test_loss, test_acc, n_iter = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test_40, y_test, batch_size, shuffle=False): - # input_dim must be of length 4 - X_test_a = tf.expand_dims(X_test_a, 3) - - _logits, _ = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_iter += 1 -print(" test loss: %f" % (test_loss / n_iter)) -print(" test acc: %f" % (test_acc / n_iter)) diff --git a/examples/spatial_transformer_network/tutorial_spatial_transformer_network_static.py b/examples/spatial_transformer_network/tutorial_spatial_transformer_network_static.py deleted file mode 100644 index 450284d91..000000000 --- a/examples/spatial_transformer_network/tutorial_spatial_transformer_network_static.py +++ /dev/null @@ -1,164 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import Model - -##================== PREPARE DATA ============================================## -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - - -def pad_distort_im_fn(x): - """ Zero pads an image to 40x40, and distort it. - - Examples - --------- - x = pad_distort_im_fn(X_train[0]) - print(x, x.shape, x.max()) - tl.vis.save_image(x, '_xd.png') - tl.vis.save_image(X_train[0], '_x.png') - """ - b = np.zeros((40, 40, 1), dtype=np.float32) - o = int((40 - 28) / 2) - b[o:o + 28, o:o + 28] = x - x = b - x = tl.prepro.rotation(x, rg=30, is_random=True, fill_mode='constant') - x = tl.prepro.shear(x, 0.05, is_random=True, fill_mode='constant') - x = tl.prepro.shift(x, wrg=0.25, hrg=0.25, is_random=True, fill_mode='constant') - x = tl.prepro.zoom(x, zoom_range=(0.95, 1.05)) - return x - - -def pad_distort_ims_fn(X): - """ Zero pads images to 40x40, and distort them. """ - X_40 = [] - for X_a, _ in tl.iterate.minibatches(X, X, 50, shuffle=False): - X_40.extend(tl.prepro.threading_data(X_a, fn=pad_distort_im_fn)) - X_40 = np.asarray(X_40) - return X_40 - - -# create dataset with size of 40x40 with distortion -X_train_40 = pad_distort_ims_fn(X_train) -X_val_40 = pad_distort_ims_fn(X_val) -X_test_40 = pad_distort_ims_fn(X_test) - -tl.vis.save_images(X_test[0:32], [4, 8], '_imgs_original.png') -tl.vis.save_images(X_test_40[0:32], [4, 8], '_imgs_distorted.png') - - -##================== DEFINE MODEL ============================================## -def get_model(inputs_shape): - ni = Input(inputs_shape) - - ## 1. Localisation network - # use MLP as the localisation net - nn = Flatten()(ni) - nn = Dense(n_units=20, act=tf.nn.tanh)(nn) - nn = Dropout(keep=0.8)(nn) - # you can also use CNN instead for MLP as the localisation net - - ## 2. Spatial transformer module (sampler) - stn = SpatialTransformer2dAffine(out_size=(40, 40), in_channels=20) - nn = stn((nn, ni)) - s = nn - - ## 3. Classifier - nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn) - nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn) - nn = Flatten()(nn) - nn = Dense(n_units=1024, act=tf.nn.relu)(nn) - nn = Dense(n_units=10, act=tf.identity)(nn) - - M = Model(inputs=ni, outputs=[nn, s]) - return M - - -net = get_model([None, 40, 40, 1]) - -##================== DEFINE TRAIN OPS ========================================## -n_epoch = 100 -learning_rate = 0.0001 -print_freq = 10 -batch_size = 64 -train_weights = net.trainable_weights -optimizer = tf.optimizers.Adam(lr=learning_rate) - -##================== TRAINING ================================================## -print("Training ...") -for epoch in range(n_epoch): - start_time = time.time() - - net.train() # enable dropout - - for X_train_a, y_train_a in tl.iterate.minibatches(X_train_40, y_train, batch_size, shuffle=True): - # input_dim must be of length 4 - X_train_a = tf.expand_dims(X_train_a, 3) - - with tf.GradientTape() as tape: - ## compute outputs - _logits, _ = net(X_train_a) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train() - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_train_a, name='train_loss') - - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - - net.eval() # disable dropout - - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - - train_loss, train_acc, n_iter = 0, 0, 0 - for X_train_a, y_train_a in tl.iterate.minibatches(X_train_40, y_train, batch_size, shuffle=False): - # input_dim must be of length 4 - X_train_a = tf.expand_dims(X_train_a, 3) - - _logits, _ = net(X_train_a) # alternatively, you can use MLP(x, is_train=False) and remove MLP.eval() - train_loss += tl.cost.cross_entropy(_logits, y_train_a, name='eval_train_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_train_a)) - n_iter += 1 - print(" train loss: %f" % (train_loss / n_iter)) - print(" train acc: %f" % (train_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_val_a, y_val_a in tl.iterate.minibatches(X_val_40, y_val, batch_size, shuffle=False): - # input_dim must be of length 4 - X_val_a = tf.expand_dims(X_val_a, 3) - - _logits, _ = net(X_val_a) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_val_a, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_val_a)) - n_iter += 1 - print(" val loss: %f" % (val_loss / n_iter)) - print(" val acc: %f" % (val_acc / n_iter)) - - print('save images') - _, trans_imgs = net(tf.expand_dims(X_test_40[0:64], 3)) - trans_imgs = trans_imgs.numpy() - tl.vis.save_images(trans_imgs[0:32], [4, 8], '_imgs_distorted_after_stn_%s.png' % epoch) - -##================== EVALUATION ==============================================## -print('Evaluation') - -net.eval() - -test_loss, test_acc, n_iter = 0, 0, 0 -for X_test_a, y_test_a in tl.iterate.minibatches(X_test_40, y_test, batch_size, shuffle=False): - # input_dim must be of length 4 - X_test_a = tf.expand_dims(X_test_a, 3) - - _logits, _ = net(X_test_a) - test_loss += tl.cost.cross_entropy(_logits, y_test_a, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_test_a)) - n_iter += 1 -print(" test loss: %f" % (test_loss / n_iter)) -print(" test acc: %f" % (test_acc / n_iter)) diff --git a/examples/text_classification/readme.md b/examples/text_classification/readme.md deleted file mode 100644 index 1045efc2f..000000000 --- a/examples/text_classification/readme.md +++ /dev/null @@ -1,29 +0,0 @@ - -### Introduction - -The demos implement [FastText](http://arxiv.org/abs/1607.01759)[1] for sentence classification. - -Code: [tutorial_imdb_fasttext.py](tutorial_imdb_fasttext.py) - -FastText is a simple model for text classification with performance often close -to state-of-the-art, and is useful as a solid baseline. - -There are some important differences between this implementation and what -is described in the paper. Instead of Hogwild! SGD[2], we use Adam optimizer -with mini-batches. Hierarchical softmax is also not supported; if you have -a large label space, consider utilizing candidate sampling methods provided -by TensorFlow[3]. - -After 5 epochs, you should get test accuracy around 90.3%. - -### References - -[1] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016). - Bag of Tricks for Efficient Text Classification. - - -[2] Recht, B., Re, C., Wright, S., & Niu, F. (2011). - Hogwild: A Lock-Free Approach to Parallelizing Stochastic Gradient Descent. - In Advances in Neural Information Processing Systems 24 (pp. 693–701). - -[3] diff --git a/examples/text_classification/tutorial_imdb_fasttext.py b/examples/text_classification/tutorial_imdb_fasttext.py deleted file mode 100644 index 53b0fdce7..000000000 --- a/examples/text_classification/tutorial_imdb_fasttext.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python -""" -This demo implements FastText[1] for sentence classification. This demo should be run in eager mode and -can be slower than the corresponding demo in graph mode. - -FastText is a simple model for text classification with performance often close -to state-of-the-art, and is useful as a solid baseline. - -There are some important differences between this implementation and what -is described in the paper. Instead of Hogwild! SGD[2], we use Adam optimizer -with mini-batches. Hierarchical softmax is also not supported; if you have -a large label space, consider utilizing candidate sampling methods provided -by TensorFlow[3]. - -After 5 epochs, you should get test accuracy around 90.3%. - -[1] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016). - Bag of Tricks for Efficient Text Classification. - http://arxiv.org/abs/1607.01759 - -[2] Recht, B., Re, C., Wright, S., & Niu, F. (2011). - Hogwild: A Lock-Free Approach to Parallelizing Stochastic Gradient Descent. - In Advances in Neural Information Processing Systems 24 (pp. 693–701). - -[3] https://www.tensorflow.org/api_guides/python/nn#Candidate_Sampling - -""" -import array -import hashlib -import os -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * - -# Hashed n-grams with 1 < n <= N_GRAM are included as features -# in addition to unigrams. -N_GRAM = 2 - -# Size of vocabulary; less frequent words will be treated as "unknown" -VOCAB_SIZE = 100000 - -# Number of buckets used for hashing n-grams -N_BUCKETS = 1000000 - -# Size of the embedding vectors -EMBEDDING_SIZE = 50 - -# Number of epochs for which the model is trained -N_EPOCH = 5 - -# Number of steps for printing -N_STEPS_TO_PRINT = 100 - -# Size of training mini-batches -BATCH_SIZE = 32 - -# Learning rate -LEARNING_RATE = 0.01 - -# Path to which to save the trained model -MODEL_FILE_PATH = 'model_dynamic.hdf5' - - -class FastTextModel(Model): - """ Model structure and forwarding of FastText """ - - def __init__(self, vocab_size, embedding_size, n_labels, name='fasttext'): - super(FastTextModel, self).__init__(name=name) - - self.avg_embed = AverageEmbedding(vocab_size, embedding_size) - self.dense1 = Dense(n_units=10, in_channels=embedding_size) - self.dense2 = Dense(n_units=n_labels, in_channels=10) - - def forward(self, x): - z = self.avg_embed(x) - z = self.dense1(z) - z = self.dense2(z) - return z - - -def augment_with_ngrams(unigrams, unigram_vocab_size, n_buckets, n=2): - """Augment unigram features with hashed n-gram features.""" - - def get_ngrams(n): - return list(zip(*[unigrams[i:] for i in range(n)])) - - def hash_ngram(ngram): - bytes_ = array.array('L', ngram).tobytes() - hash_ = int(hashlib.sha256(bytes_).hexdigest(), 16) - return unigram_vocab_size + hash_ % n_buckets - - return unigrams + [hash_ngram(ngram) for i in range(2, n + 1) for ngram in get_ngrams(i)] - - -def load_and_preprocess_imdb_data(n_gram=None): - """Load IMDb data and augment with hashed n-gram features.""" - tl.logging.info("Loading and preprocessing IMDB data.") - - X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE) - - if n_gram is not None: - X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train]) - X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test]) - - return X_train, y_train, X_test, y_test - - -def train_test_and_save_model(): - X_train, y_train, X_test, y_test = load_and_preprocess_imdb_data(N_GRAM) - model = FastTextModel( - vocab_size=VOCAB_SIZE + N_BUCKETS, - embedding_size=EMBEDDING_SIZE, - n_labels=2, - ) - optimizer = tf.optimizers.Adam(learning_rate=LEARNING_RATE) - - if os.path.exists(MODEL_FILE_PATH): - # loading pre-trained model if applicable - model.load_weights(MODEL_FILE_PATH) - else: - # training - model.train() - - for epoch in range(N_EPOCH): - start_time = time.time() - print('Epoch %d/%d' % (epoch + 1, N_EPOCH)) - train_accuracy = list() - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size=BATCH_SIZE, shuffle=True): - - # forward and define the loss function - # TODO: use tf.function to speed up - with tf.GradientTape() as tape: - y_pred = model(tl.prepro.pad_sequences(X_batch)) - cost = tl.cost.cross_entropy(y_pred, y_batch, name='cost') - - # backward, calculate gradients and update the weights - grad = tape.gradient(cost, model.trainable_weights) - optimizer.apply_gradients(zip(grad, model.trainable_weights)) - - # calculate the accuracy - predictions = tf.argmax(y_pred, axis=1, output_type=tf.int32) - are_predictions_correct = tf.equal(predictions, y_batch) - accuracy = tf.reduce_mean(tf.cast(are_predictions_correct, tf.float32)) - - train_accuracy.append(accuracy) - if len(train_accuracy) % N_STEPS_TO_PRINT == 0: - print( - "\t[%d/%d][%d]accuracy " % (epoch + 1, N_EPOCH, len(train_accuracy)), - np.mean(train_accuracy[-N_STEPS_TO_PRINT:]) - ) - - print("\tSummary: time %.5fs, overall accuracy" % (time.time() - start_time), np.mean(train_accuracy)) - - # evaluation and testing - model.eval() - - # forward and calculate the accuracy - y_pred = model(tl.prepro.pad_sequences(X_test)) - predictions = tf.argmax(y_pred, axis=1, output_type=tf.int32) - are_predictions_correct = tf.equal(predictions, y_test) - test_accuracy = tf.reduce_mean(tf.cast(are_predictions_correct, tf.float32)) - - print('Test accuracy: %.5f' % test_accuracy) - - # saving the model - model.save_weights(MODEL_FILE_PATH) - - -if __name__ == '__main__': - train_test_and_save_model() diff --git a/examples/text_generation/README.md b/examples/text_generation/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/examples/text_generation/data/.DS_Store b/examples/text_generation/data/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0
586368 -. 440479 -on 213612 -of 202290 -the 196219 -in 182598 -with 152984 -and 139109 -is 97322 -man 72712 -to 67506 -sitting 52259 -an 49463 -two 47993 -, 43921 -standing 42264 -at 42204 -people 41672 -are 40768 -next 36794 -white 35874 -woman 33849 -street 30173 -table 29681 -that 27969 -holding 27648 -it 26574 -person 24540 -large 24417 -some 24130 -down 22912 -top 21994 -group 21582 -up 20742 -field 20614 -small 19884 -tennis 19485 -near 19450 -his 19304 -front 19296 -black 19187 -train 18217 -dog 18107 -plate 18081 -riding 18081 -room 18076 -red 17185 -young 16971 -cat 16933 -by 16864 -water 16374 -baseball 15472 -has 14974 -while 14551 -playing 14504 -walking 14492 -bathroom 14339 -sign 14028 -blue 13618 -kitchen 13244 -food 13049 -grass 12917 -there 12678 -bus 12554 -green 12484 -parked 12436 -pizza 12360 -side 12328 -building 12309 -other 12082 -bed 11869 -looking 11826 -snow 11734 -beach 11249 -ball 11105 -three 11014 -couple 11013 -for 10929 -boy 10814 -men 10688 -toilet 10515 -clock 10326 -city 10322 -flying 10141 -road 10137 -wearing 9905 -out 9852 -skateboard 9755 -her 9744 -player 9659 -over 9572 -several 9415 -game 9400 -girl 9331 -laying 9259 -from 9236 -sits 9200 -picture 8844 -wooden 8808 -bench 8772 -bear 8705 -area 8691 -through 8603 -their 8593 -one 8553 -laptop 8498 -around 8408 -horse 8336 -eating 8321 -brown 8320 -yellow 8263 -cake 8243 -phone 8054 -frisbee 8053 -computer 8005 -sink 7945 -board 7939 -giraffe 7848 -outside 7770 -as 7628 -air 7514 -living 7494 -truck 7490 -window 7321 -motorcycle 7241 -desk 7188 -'s 7154 -umbrella 7135 -car 7050 -tree 7019 -trees 6986 -covered 6942 -wall 6927 -each 6926 -open 6917 -elephant 6867 -park 6854 -many 6831 -close 6765 -behind 6680 -this 6679 -very 6666 -old 6577 -under 6474 -filled 6440 -little 6412 -fire 6349 -stop 6333 -court 6213 -sky 6144 -together 6081 -child 6035 -into 6023 -surfboard 5910 -its 5857 -kite 5845 -background 5747 -skis 5698 -inside 5617 -sheep 5612 -boat 5563 -bat 5515 -back 5513 -bowl 5502 -stands 5470 -big 5470 -photo 5464 -chair 5454 -view 5416 -light 5410 -bunch 5347 -ocean 5343 -couch 5306 -bird 5300 -glass 5281 -traffic 5241 -cell 5208 -airplane 5182 -hydrant 5139 -zebra 5116 -fence 5064 -mirror 5022 -teddy 5021 -shirt 4974 -counter 4949 -orange 4933 -women 4924 -sandwich 4898 -hand 4888 -another 4865 -sidewalk 4863 -plane 4834 -different 4812 -wave 4778 -floor 4681 -lot 4676 -stand 4661 -tall 4635 -parking 4635 -giraffes 4596 -flowers 4593 -cars 4584 -horses 4546 -vase 4538 -tracks 4535 -racket 4462 -baby 4449 -tower 4446 -ground 4373 -grassy 4306 -tie 4304 -vegetables 4301 -zebras 4264 -off 4242 -being 4231 -elephants 4215 -day 4169 -bananas 4153 -along 4087 -full 4074 -middle 4052 -ready 3990 -image 3967 -hill 3932 -dirt 3919 -station 3884 -taking 3870 -bike 3836 -sit 3835 -signs 3833 -four 3785 -slope 3771 -driving 3742 -stuffed 3710 -head 3676 -piece 3632 -above 3618 -broccoli 3589 -grazing 3571 -cows 3565 -skiing 3562 -across 3549 -beside 3535 -luggage 3525 -long 3508 -wine 3504 -snowy 3472 -skate 3408 -them 3388 -wii 3378 -ski 3372 -hanging 3365 -hat 3359 -during 3355 -glasses 3318 -mountain 3304 -refrigerator 3302 -holds 3297 -children 3295 -camera 3288 -doing 3285 -pink 3258 -display 3232 -herd 3228 -suit 3218 -hot 3199 -cow 3192 -fruit 3173 -buildings 3159 -pole 3159 -corner 3097 -going 3064 -empty 3057 -looks 3033 -umbrellas 3028 -cutting 3018 -watching 3016 -oven 3013 -kites 3011 -pair 3004 -trick 2997 -jumping 2976 -stove 2958 -track 2957 -smiling 2955 -dogs 2942 -keyboard 2938 -chairs 2937 -posing 2904 -talking 2899 -boats 2898 -double 2874 -airport 2869 -door 2867 -television 2866 -box 2865 -soccer 2856 -colorful 2837 -crowd 2833 -traveling 2813 -animals 2795 -swinging 2791 -video 2775 -tv 2771 -surf 2770 -topped 2748 -various 2743 -getting 2741 -birds 2735 -using 2718 -lady 2714 -who 2687 -plates 2687 -body 2667 -against 2634 -set 2632 -hit 2630 -all 2620 -paper 2583 -banana 2582 -guy 2566 -motorcycles 2550 -coffee 2540 -wood 2540 -carrying 2539 -brick 2520 -lots 2515 -river 2504 -cup 2483 -someone 2471 -bedroom 2458 -cheese 2457 -something 2451 -night 2445 -lights 2422 -waiting 2415 -restaurant 2413 -house 2407 -be 2398 -bears 2385 -walk 2368 -players 2364 -shower 2346 -skateboarder 2345 -metal 2342 -meat 2341 -runway 2338 -skier 2326 -bicycle 2304 -remote 2294 -snowboard 2291 -racquet 2286 -face 2282 -about 2279 -home 2273 -running 2268 -high 2251 -items 2251 -surfer 2248 -jet 2240 -busy 2239 -line 2219 -ramp 2205 -intersection 2159 -lying 2153 -passenger 2150 -dressed 2149 -hands 2144 -male 2127 -tray 2094 -like 2080 -surfing 2076 -mouth 2036 -book 2031 -he 2024 -suitcase 2013 -decker 2005 -animal 2001 -him 2000 -slice 1982 -preparing 1976 -store 1972 -shown 1972 -rides 1967 -cut 1963 -bridge 1940 -pulling 1933 -made 1933 -bottle 1921 -scissors 1919 -batter 1913 -screen 1906 -gray 1899 -bag 1898 -sleeping 1877 -donuts 1872 -half 1869 -look 1867 -zoo 1862 -kids 1850 -dark 1842 -way 1834 -number 1833 -enclosure 1832 -row 1826 -surrounded 1808 -microwave 1803 -tub 1802 -knife 1801 -sand 1799 -jacket 1795 -showing 1787 -between 1787 -carrots 1768 -play 1763 -adult 1753 -colored 1750 -decorated 1741 -toy 1741 -pile 1732 -silver 1728 -lake 1728 -few 1713 -boys 1702 -cabinets 1699 -forest 1698 -lined 1698 -buses 1690 -walks 1686 -mouse 1684 -skiers 1680 -older 1671 -meal 1665 -seat 1663 -purple 1652 -girls 1646 -bread 1646 -past 1642 -`` 1631 -oranges 1631 -furniture 1626 -hair 1605 -grey 1604 -swing 1590 -outdoor 1584 -kid 1579 -have 1575 -cloudy 1572 -waves 1567 -coming 1560 -displayed 1548 -drink 1538 -photograph 1534 -'' 1524 -throwing 1523 -attached 1515 -can 1500 -chocolate 1497 -leaning 1496 -onto 1492 -crossing 1488 -monitor 1485 -fork 1484 -scene 1478 -making 1476 -painted 1471 -shelf 1454 -dining 1453 -pan 1442 -rocks 1439 -cats 1439 -meter 1435 -stone 1434 -hitting 1424 -drinking 1423 -seen 1421 -salad 1421 -walls 1413 -no 1412 -lush 1408 -lit 1407 -apples 1406 -towards 1402 -birthday 1402 -watch 1397 -female 1391 -resting 1391 -cross 1387 -office 1373 -market 1364 -fruits 1359 -rain 1355 -windows 1351 -public 1349 -bright 1347 -apple 1339 -sunny 1337 -blanket 1330 -dish 1324 -leaves 1322 -clean 1321 -tables 1318 -flower 1305 -catch 1305 -fries 1303 -plastic 1303 -bikes 1302 -sun 1300 -clear 1293 -stopped 1290 -been 1288 -edge 1283 -beautiful 1279 -mountains 1278 -surfboards 1278 -books 1277 -moving 1276 -statue 1271 -teeth 1268 -setting 1266 -pictures 1263 -snowboarder 1262 -rail 1252 -helmet 1252 -ride 1251 -dress 1249 -trying 1247 -working 1245 -underneath 1242 -slices 1229 -branch 1229 -uniform 1223 -donut 1220 -rock 1219 -yard 1210 -platform 1203 -bath 1193 -time 1186 -controller 1173 -shot 1172 -motor 1172 -eat 1170 -or 1164 -pieces 1161 -shows 1160 -nice 1156 -cellphone 1152 -perched 1147 -having 1146 -skateboarding 1145 -placed 1136 -cart 1135 -country 1133 -catcher 1130 -wet 1128 -shore 1126 -basket 1125 -computers 1124 -passing 1121 -pitch 1119 -case 1118 -police 1114 -path 1113 -sandy 1111 -surface 1110 -vases 1107 -base 1107 -cooking 1105 -family 1095 -they 1091 -vehicle 1090 -hotel 1089 -dinner 1078 -eaten 1076 -modern 1071 -pizzas 1071 -types 1070 -sauce 1059 -lap 1058 -just 1057 -multiple 1053 -boards 1051 -town 1051 -doughnut 1049 -tiled 1048 -others 1048 -brushing 1047 -nintendo 1046 -reading 1045 -plant 1043 -beer 1042 -trains 1040 -watches 1032 -pitcher 1023 -doughnuts 1022 -single 1021 -toppings 1019 -laptops 1018 -benches 1017 -post 1012 -variety 1011 -engine 1010 -mounted 1007 -lamp 1007 -plants 1007 -trucks 1003 -guys 1003 -enjoying 999 -gear 996 -bowls 995 -tarmac 990 -curb 987 -jump 986 -appliances 985 -distance 984 -fresh 983 -passengers 982 -gathered 982 -graffiti 976 -woods 975 -pretty 974 -phones 973 -tricks 971 -bathtub 971 -end 969 -performing 967 -cute 964 -rice 960 -pen 957 -place 952 -shoes 949 -candles 947 -flies 941 -shop 940 -toothbrush 931 -containing 930 -pasture 928 -left 925 -five 922 -fenced 921 -carriage 919 -cattle 919 -poles 918 -chicken 918 -bottles 915 -feeding 910 -brush 907 -dirty 906 -pot 897 -match 897 -railroad 896 -concrete 893 -below 892 -after 889 -right 888 -bags 888 -school 886 -tile 885 -fly 885 -dock 881 -neck 879 -take 875 -drinks 873 -including 872 -steel 871 -bar 871 -fireplace 870 -takes 869 -vintage 868 -she 861 -pillows 856 -striped 853 -sofa 853 -pulled 850 -crowded 849 -nearby 848 -huge 845 -sinks 845 -sandwiches 844 -rack 840 -control 840 -well 839 -catching 838 -dry 837 -sliced 834 -alone 834 -center 833 -fridge 833 -vehicles 832 -boxes 831 -polar 829 -shaped 828 -planes 826 -reaching 824 -equipment 822 -trunk 821 -beds 818 -container 818 -skateboards 818 -flat 813 -giant 813 -church 811 -floating 807 -both 803 -bicycles 799 -suitcases 798 -served 796 -feet 795 -where 793 -poses 792 -arm 791 -bushes 787 -not 786 -plays 786 -away 782 -legs 781 -serve 778 -atop 777 -asian 776 -tomatoes 776 -clothes 774 -taken 770 -towel 768 -putting 768 -staring 767 -sticking 767 -hay 766 -airplanes 765 -smiles 764 -prepares 764 -potatoes 762 -lays 761 -space 761 -cream 759 -pose 758 -surfers 756 -commercial 755 -style 755 -professional 752 -foods 750 -serving 748 -dishes 747 -spoon 746 -wild 746 -trail 745 -run 735 -subway 734 -which 734 -mother 733 -show 731 -work 731 -cabinet 731 -cement 729 -christmas 725 -shorts 724 -painting 722 -soup 716 -breakfast 715 -wire 715 -lone 712 -round 711 -stacked 710 -square 709 -pool 708 -same 708 -toward 707 -backpack 707 -cooked 706 -highway 703 -reflection 696 -swimming 689 -games 688 -dessert 685 -umpire 684 -blender 681 -stall 679 -french 679 -pillow 678 -seated 676 -flag 675 -throw 675 -swings 675 -docked 674 -outdoors 674 -adults 670 -couches 669 -go 668 -cluttered 667 -wedding 667 -get 665 -boarding 661 -hotdog 661 -clocks 659 -rug 659 -assortment 659 -van 659 -controllers 656 -business 656 -drives 655 -garden 654 -sunglasses 652 -team 647 -bun 646 -hillside 641 -ledge 636 -among 634 -military 633 -flock 632 -rocky 631 -wooded 629 -skies 629 -jumps 628 -low 627 -restroom 626 -facing 626 -ice 621 -trash 621 -does 620 -doors 619 -foot 618 -stainless 618 -shelves 618 -lawn 617 -mid 617 -desktop 616 -kitten 613 -stairs 613 -onions 613 -eyes 612 -drawn 609 -assorted 608 -gate 608 -desert 604 -see 602 -event 600 -pointing 598 -fish 597 -closeup 596 -things 595 -cups 595 -eats 594 -clouds 593 -ceiling 593 -race 592 -coat 590 -eggs 588 -before 587 -steps 586 -monitors 583 -toddler 583 -arms 583 -landing 577 -picnic 574 -construction 573 -curtain 573 -says 573 -rackets 570 -land 569 -floors 569 -electronic 567 -party 566 -snowboarding 566 -broken 566 -glove 566 -friends 565 -arranged 565 -turn 565 -lunch 562 -kneeling 562 -cage 560 -boarder 554 -overlooking 554 -tied 554 -graze 553 -pond 552 -rider 552 -closed 552 -bottom 551 -new 550 -overhead 548 -type 547 -vegetable 547 -narrow 546 -tan 545 -colors 543 -cakes 542 -skating 539 -wide 539 -reads 538 -kind 537 -pots 537 -sale 537 -roof 536 -object 535 -messy 535 -fancy 535 -himself 534 -gold 533 -hold 527 -machine 526 -suits 524 -trailer 522 -sides 521 -pasta 520 -plain 519 -towels 516 -pastries 516 -appears 515 -foreground 515 -sunset 514 -bite 514 -cloth 514 -features 509 -walkway 509 -pants 509 -veggies 508 -urban 508 -ties 508 -sea 507 -pie 506 -leather 505 -photos 503 -transit 503 -shade 503 -net 502 -make 502 -short 500 -rest 500 -what 500 -was 499 -loaded 496 -blurry 496 -carrot 495 -clothing 494 -action 493 -scooter 492 -commuter 491 -palm 490 -ear 490 -smoke 488 -fashioned 488 -pepperoni 488 -leash 488 -antique 486 -hole 486 -device 485 -baked 483 -ship 482 -partially 482 -still 481 -beneath 480 -doorway 479 -pastry 479 -signal 479 -toys 479 -toilets 478 -lies 477 -wrapped 475 -held 474 -harbor 473 -houses 471 -pedestrians 471 -kinds 470 -gets 470 -petting 468 -fighter 468 -tour 467 -giving 467 -decorative 466 -chips 464 -bow 463 -racing 463 -do 462 -structure 462 -pier 461 -tomato 458 -outfit 457 -used 455 -contains 455 -grill 454 -island 453 -rests 453 -farm 452 -winter 450 -railing 449 -containers 448 -includes 446 -vanity 446 -lettuce 443 -peppers 442 -bunches 442 -part 441 -streets 440 -hard 440 -beans 439 -opened 438 -sheet 438 -model 438 -cap 438 -prepared 437 -leading 437 -electric 436 -six 435 -steam 435 -garage 434 -use 434 -apartment 433 -lid 432 -residential 430 -makes 429 -smart 429 -waits 429 -stack 429 -deck 429 -shopping 428 -lift 426 -ripe 426 -produce 425 -cover 425 -sheets 424 -heads 424 -rural 424 -bacon 424 -papers 422 -doll 422 -meters 421 -tooth 420 -sail 420 -barn 420 -i 419 -directions 417 -happy 416 -but 416 -here 416 -turned 415 -hats 415 -brightly 415 -tiny 414 -mound 414 -remotes 414 -these 414 -branches 414 -juice 413 -writing 413 -pavement 412 -upside 410 -snowboards 409 -disc 409 -rainy 408 -frosting 406 -nose 405 -alongside 405 -soda 404 -patio 404 -color 403 -displaying 401 -smaller 401 -parade 401 -riders 400 -pans 400 -tea 400 -touching 399 -close-up 398 -candle 397 -flight 397 -log 396 -ducks 396 -milk 396 -mug 395 -officer 395 -mustard 395 -carpet 395 -laid 394 -costume 393 -sized 393 -itself 392 -loading 391 -only 391 -wetsuit 391 -personal 390 -sort 390 -purse 390 -smile 390 -sill 389 -persons 389 -kicking 388 -ketchup 388 -bush 387 -ladies 386 -decorations 386 -roll 385 -uniforms 385 -noodles 385 -urinals 385 -course 384 -flags 383 -american 382 -mushrooms 382 -dresser 381 -platter 380 -sliding 380 -enclosed 380 -pipe 379 -paddle 379 -wheel 378 -pick 378 -goats 377 -leans 377 -stuff 376 -trays 376 -lines 376 -pickup 375 -beige 375 -gas 375 -utensils 373 -deep 373 -elderly 373 -egg 373 -goes 371 -sailing 371 -throws 370 -fried 370 -approaching 369 -sweater 368 -vest 368 -tent 367 -ornate 366 -fun 366 -sausage 366 -attempting 365 -crosswalk 365 -greens 365 -runs 364 -growing 363 -tow 363 -terminal 362 -rear 362 -uses 362 -bride 362 -hangs 360 -written 359 -rows 359 -propeller 358 -fake 357 -passes 357 -jets 356 -indoor 356 -built 355 -stick 354 -direction 351 -pulls 351 -collection 351 -beverage 350 -art 348 -posted 347 -shoe 347 -neatly 346 -wind 346 -roadway 345 -rolling 345 -curtains 344 -leg 344 -if 344 -chef 344 -relaxing 344 -paint 343 -rope 343 -bull 343 -power 342 -toast 342 -wires 341 -bending 340 -sticks 339 -baskets 339 -airliner 339 -balls 339 -museum 339 -missing 338 -potted 338 -curled 338 -shirtless 338 -falling 338 -aircraft 338 -mitt 337 -tank 335 -sports 335 -stadium 334 -frame 333 -glazed 333 -supplies 333 -tops 333 -spectators 333 -siting 332 -lamps 331 -sculpture 331 -roses 330 -performs 330 -wagon 330 -how 330 -telephone 330 -stunt 329 -without 329 -paved 329 -mirrors 328 -locomotive 328 -tiles 327 -wheels 327 -talks 326 -practicing 326 -guitar 325 -good 325 -also 324 -toothbrushes 324 -block 324 -selfie 323 -gather 323 -toaster 323 -electronics 322 -jeans 321 -cupcakes 321 -shallow 321 -formation 321 -packed 320 -we 320 -wait 320 -2 319 -eye 319 -landscape 319 -opposite 318 -stream 318 -bank 317 -tail 317 -flown 317 -garbage 317 -chain 316 -pass 316 -shoulder 316 -jetliner 316 -piled 316 -steep 315 -travels 315 -blankets 315 -ingredients 315 -waters 315 -bucket 312 -homemade 312 -cones 312 -you 312 -motion 312 -hotdogs 311 -images 310 -so 310 -selling 310 -dead 309 -pedestrian 309 -floral 309 -spread 308 -hits 307 -competition 307 -put 307 -puppy 306 -holder 306 -groom 306 -fallen 305 -covering 305 -fluffy 305 -workers 304 -downhill 303 -flip 302 -freezer 302 -semi 302 -skateboarders 301 -objects 300 -sprinkles 299 -fast 299 -headphones 298 -gathering 298 -pictured 297 -marble 297 -tongue 297 -snowboarders 297 -icing 296 -strawberries 295 -stuck 295 -pitching 295 -pushing 295 -faces 295 -horseback 294 -counters 294 -visible 294 -neon 293 -featuring 293 -handle 293 -asleep 293 -mixed 293 -surrounding 293 -blonde 292 -pack 291 -rolls 291 -habitat 290 -drive 290 -cargo 290 -frosted 290 -urinal 289 -entrance 289 -pouring 288 -shirts 287 -life 287 -patch 286 -wears 285 -slopes 285 -climbing 285 -microphone 285 -golden 284 -checkered 284 -london 284 -whole 283 -speed 283 -string 283 -duck 283 -arrangement 282 -design 281 -beard 281 -skirt 280 -range 279 -officers 279 -shiny 279 -item 279 -reaches 278 -haired 278 -travel 277 -carries 277 -stickers 276 -delicious 274 -desserts 274 -grilled 274 -name 274 -far 274 -napkin 272 -liquid 272 -covers 272 -beef 271 -feeder 271 -interior 271 -moves 270 -batting 270 -butter 270 -someones 270 -position 270 -opening 269 -hills 269 -ben 269 -cleaning 269 -bare 269 -blowing 268 -goat 268 -first 267 -controls 267 -laughing 267 -advertisement 266 -checking 266 -straw 266 -mobile 266 -lighting 265 -basketball 265 -olives 265 -displays 264 -shape 264 -cookies 264 -great 264 -onion 263 -gravel 262 -rusty 262 -cardboard 261 -fountain 261 -ahead 260 -corn 259 -rusted 259 -plaid 259 -ham 257 -condiments 257 -potato 257 -miniature 257 -newspaper 256 -wing 256 -hang 255 -located 255 -muddy 255 -seats 255 -catches 255 -carts 254 -fishing 254 -lane 253 -tabby 253 -thin 253 -rainbow 252 -flooring 252 -foil 251 -clay 251 -fan 251 -bookshelf 250 -mini 250 -hood 249 -trolley 249 -tools 249 -unmade 248 -turning 248 -bunk 248 -jar 247 -bent 247 -travelling 246 -exhibit 245 -pet 245 -either 245 -posed 245 -shadow 244 -hospital 244 -forward 244 -cafe 243 -herself 243 -watering 243 -hallway 243 -sharing 243 -words 243 -following 242 -gloves 242 -library 242 -stoplight 242 -n't 242 -attire 242 -grapes 241 -meadow 240 -pickle 240 -series 240 -stares 240 -santa 239 -driver 239 -aerial 239 -nicely 238 -size 238 -sleeps 238 -comforter 238 -seems 238 -steeple 238 -calf 238 -stage 237 -frisbees 237 -pancakes 236 -us 235 -crust 235 -helping 235 -more 234 -dishwasher 234 -chasing 234 -backs 234 -burger 234 -almost 234 -double-decker 234 -seagulls 234 -industrial 234 -mostly 233 -thrown 233 -advertising 233 -chili 233 -brushes 233 -wings 233 -league 233 -return 233 -heading 232 -chinese 232 -lay 232 -teams 231 -accessories 231 -multicolored 231 -coach 231 -freshly 231 -paw 231 -ceramic 230 -statues 230 -tasty 230 -balcony 229 -baking 229 -matching 229 -paws 228 -trunks 228 -steak 228 -picking 227 -tunnel 227 -railway 226 -countryside 226 -designed 226 -collar 226 -multi 226 -cowboy 226 -iron 226 -hugging 226 -larger 225 -leaving 225 -neighborhood 225 -system 225 -marina 225 -horns 224 -cuts 224 -biting 223 -bouquet 223 -twin 222 -funny 222 -jumbo 222 -owner 221 -alley 221 -gentleman 220 -mat 220 -amongst 220 -cycle 220 -chopped 219 -booth 219 -buffet 218 -father 218 -story 218 -mask 218 -heavy 217 -dusk 217 -individual 217 -hose 217 -hamburger 217 -cupcake 216 -lamb 216 -tries 216 -driveway 215 -sugar 215 -abandoned 215 -blow 215 -foreign 214 -prepare 214 -formal 214 -bakery 214 -downtown 214 -metallic 213 -audience 213 -shining 213 -poster 213 -sniffing 213 -resort 213 -calm 213 -sets 212 -helmets 211 -motorbike 211 -tractor 211 -rails 211 -baggage 211 -worker 211 -devices 211 -sunlight 210 -busses 210 -dryer 210 -rose 210 -seagull 209 -smoking 209 -cold 209 -heart 209 -circular 209 -connected 209 -propped 208 -cases 208 -mans 208 -boots 208 -typing 207 -folded 207 -chest 207 -hiding 207 -sub 206 -t-shirt 206 -backyard 205 -stool 205 -framed 205 -motorcyclist 204 -washing 204 -mud 204 -candy 204 -natural 204 -fans 204 -focus 204 -entertainment 203 -bearded 203 -mattress 203 -chewing 203 -castle 203 -onlookers 203 -dimly 203 -bay 203 -bars 203 -contents 202 -weather 202 -section 202 -decor 202 -neat 202 -magnets 202 -raised 202 -organized 201 -shaking 201 -reflected 200 -furry 200 -cliff 199 -kick 199 -lemon 199 -cook 199 -upon 199 -cupboards 199 -ears 198 -cigarette 198 -bikers 198 -thick 198 -evening 197 -storage 197 -point 196 -sailboat 196 -males 196 -dump 196 -lounging 195 -bell 195 -racquets 195 -chefs 195 -porch 195 -lighthouse 194 -3 194 -spinach 194 -progress 194 -wilderness 193 -unique 193 -kitty 193 -collage 193 -skiis 193 -hardwood 193 -tossing 193 -' 192 -adorable 192 -; 192 -card 192 -strip 192 -balancing 192 -canopy 192 -silverware 191 -grazes 191 -lambs 191 -tablet 191 -piano 191 -word 191 -taxi 190 -bats 190 -daytime 190 -bikini 190 -besides 190 -lemons 190 -really 190 -grocery 190 -parrot 190 -finger 189 -move 189 -scarf 189 -separate 189 -son 188 -crosses 188 -simple 188 -pony 188 -safety 187 -jersey 187 -leafy 187 -straight 187 -belt 186 -jungle 186 -peeking 186 -football 186 -saying 184 -ring 184 -fall 184 -cookie 184 -moped 184 -amount 184 -stretching 184 -tag 184 -ladder 183 -indoors 183 -scattered 183 -warning 183 -suspended 183 -odd 183 -apron 182 -delivery 182 -cabin 182 -break 182 -towering 182 -tourists 182 -nuts 181 -balloons 181 -lounge 181 -fighting 181 -pickles 181 -goggles 181 -circle 181 -quiet 180 -turkey 180 -obstacle 180 -spot 180 -sizes 180 -peeled 180 -countertop 180 -ribbon 180 -licking 180 -panda 179 -motorcyclists 179 -classroom 179 -decoration 178 -tire 178 -cellphones 178 -screens 178 -themselves 177 -golf 177 -strange 177 -hung 177 -numerous 177 -sticker 177 -topping 176 -spray 176 -enjoy 176 -logs 176 -scooters 176 -drawers 176 -shoreline 176 -magazine 175 -plaza 175 -pigeons 175 -vegetation 175 -freight 175 -courtyard 175 -crane 175 -fixing 174 -skyline 174 -feed 173 -clutter 173 -leaf 173 -closet 173 -beverages 173 -surfs 172 -st 172 -canoe 172 -complete 172 -army 172 -stops 172 -cereal 171 -snack 171 -monkey 171 -own 171 -toothpaste 170 -healthy 170 -played 170 -muffin 170 -tusks 170 -foggy 170 -fabric 169 -students 169 -jockey 169 -talk 169 -furnished 169 -comes 168 -berries 168 -tin 168 -photographs 168 -shops 168 -blond 167 -figure 167 -slightly 167 -headed 167 -buns 167 -waterway 166 -cop 166 -teenager 166 -service 165 -pine 165 -dispenser 165 -crossed 165 -softball 165 -process 164 -positioned 164 -than 164 -arrow 163 -cone 163 -faucet 163 -rustic 163 -den 163 -sideways 163 -wear 163 -nap 163 -lighted 163 -observing 163 -points 162 -speakers 162 -shrimp 162 -airlines 162 -oriental 162 -groups 161 -illuminated 161 -pairs 160 -approaches 160 -married 160 -slicing 160 -designs 160 -bedspread 160 -goods 160 -leaping 160 -pull 159 -flooded 159 -seven 159 -cauliflower 159 -catchers 159 -bathing 159 -reflecting 159 -consisting 159 -grabbing 159 -friend 158 -wading 158 -granite 158 -pedestal 158 -vendor 158 -puts 158 -cushion 158 -fixtures 158 -valley 158 -village 157 -classic 157 -meeting 157 -bin 157 -sailboats 156 -herding 156 -patterned 156 -cheesy 156 -para 156 -paddling 156 -load 156 -beak 156 -reach 156 -forks 155 -port 155 -unusual 155 -desks 155 -war 155 -attempts 155 -music 154 -interesting 154 -fully 154 -works 154 -layer 154 -drawing 154 -arena 154 -trough 154 -wit 154 -snacks 154 -caught 153 -soap 153 -bald 153 -sport 153 -leads 153 -freeway 153 -numbers 152 -level 152 -dried 152 -mall 152 -guard 152 -waving 151 -tropical 150 -peanut 150 -keys 150 -stopping 150 -pad 150 -piles 150 -spoons 150 -much 149 -basin 149 -terrain 149 -tags 149 -transportation 148 -spraying 148 -pens 148 -heard 148 -within 148 -helicopter 148 -photographer 148 -conversation 148 -had 147 -kept 147 -greenery 147 -come 147 -tasting 146 -jars 146 -notebook 146 -strapped 146 -cellular 146 -portrait 146 -monument 146 -diamond 146 -costumes 146 -step 145 -boogie 145 -individuals 144 -turns 144 -jackets 144 -roaming 144 -deer 144 -overpass 144 -pin 144 -pepper 143 -grinding 143 -cans 143 -doubles 143 -utility 143 -celebrating 143 -split 142 -conference 142 -posts 142 -skater 142 -buggy 142 -canal 142 -owl 142 -second 142 -handing 142 -order 142 -button 142 -soft 142 -wake 141 -language 141 -? 141 -pattern 141 -soldiers 141 -quilt 141 -speaking 140 -extended 140 -elegant 140 -massive 140 -gives 139 -airborne 139 -knives 139 -site 139 -hooked 139 -thing 139 -console 139 -backdrop 139 -upright 139 -bit 139 -link 138 -curve 138 -human 138 -crib 138 -chained 138 -factory 138 -tape 137 -love 137 -indicating 137 -stance 137 -tents 137 -english 137 -parachute 136 -seating 136 -tablecloth 136 -include 136 -hall 136 -print 136 -roman 136 -signals 136 -learning 136 -winding 136 -practice 136 -bagel 136 -boarders 135 -raft 135 -customers 135 -peel 134 -shades 134 -elevated 134 -hangar 134 -figurine 134 -mom 134 -teenage 133 -message 133 -cartoon 133 -cleaned 133 -world 132 -wrapper 132 -hauling 132 -fashion 132 -drivers 132 -knee 132 -savannah 132 -themed 132 -main 132 -s 131 -products 131 -adjusting 131 -daughter 131 -geese 131 -frying 131 -farmers 131 -toasted 131 -kissing 131 -will 131 -pathway 130 -weird 130 -mashed 130 -thumbs 130 -drying 130 -overcast 130 -drawer 130 -trio 130 -depot 130 -windowsill 129 -figurines 129 -meats 129 -babies 129 -pug 129 -sneakers 129 -squatting 129 -naked 129 -metro 129 -festival 128 -polka 128 -third 128 -stir 128 -depicting 128 -fat 128 -china 127 -star 127 -storm 127 -digital 127 -knees 127 -photographed 127 -scenic 126 -circus 126 -plains 126 -stroller 126 -package 126 -headboard 126 -such 126 -grown 126 -swim 125 -chopsticks 125 -multi-colored 125 -read 125 -british 125 -grab 125 -warehouse 125 -returning 125 -bookcase 125 -cooks 124 -printer 124 -gravy 124 -towing 124 -carry 124 -wicker 124 -stools 124 -dot 124 -disk 124 -marked 124 -balloon 124 -keyboards 124 -backpacks 124 -rowing 124 -competing 123 -placing 123 -sails 123 -fed 123 -barbed 123 -crate 123 -most 123 -too 123 -lobby 123 -note 123 -cruise 123 -african 123 -commode 123 -am 122 -staircase 122 -shuttle 122 -bicyclist 122 -cab 122 -puddle 122 -shines 122 -roadside 122 -worn 122 -places 121 -barrier 121 -carousel 121 -uncooked 121 -fur 121 -calico 121 -chrome 120 -ostrich 120 -balances 120 -cloud 120 -stable 120 -pears 120 -adorned 120 -listening 120 -deserted 120 -medium 120 -rocking 120 -major 120 -electrical 120 -cluster 120 -stores 119 -strawberry 119 -dough 119 -enter 119 -european 119 -float 119 -stationary 119 -powdered 119 -intently 118 -muffins 118 -coke 118 -fair 118 -tube 118 -refrigerators 118 -foliage 118 -asphalt 118 -tight 118 -directly 118 -map 118 -cathedral 118 -stepping 118 -stretched 118 -stripes 118 -lie 117 -shoulders 117 -planter 117 -names 117 -rings 117 -band 117 -environment 117 -entering 116 -closely 116 -were 116 -eight 116 -runner 116 -shrubs 116 -logo 116 -help 116 -opponent 116 -class 116 -midair 116 -leaned 116 -policeman 116 -suv 115 -sweet 115 -frozen 115 -wildlife 115 -towers 115 -starting 115 -boardwalk 115 -cubicle 115 -shapes 115 -conveyor 115 -black-and-white 115 -limb 115 -rectangular 115 -crates 114 -barrel 114 -spacious 114 -stomach 114 -tagged 114 -soldier 114 -podium 114 -fingers 114 -force 114 -stretches 114 -hind 114 -slide 113 -outfits 113 -figures 113 -diner 113 -checks 113 -floats 113 -ships 113 -dual 113 -appear 113 -signage 113 -tourist 113 -ferry 113 -racks 113 -silhouette 113 -speeding 113 -necks 112 -fog 112 -dressing 112 -partly 112 -thru 112 -operating 112 -stalls 112 -paintings 112 -spotted 112 -suite 112 -uniformed 112 -unit 112 -ones 112 -dusty 111 -touch 111 -pies 111 -tissue 111 -skillet 111 -when 111 -blurred 111 -rubbing 111 -tee 111 -dipping 111 -tiger 111 -raw 111 -key 110 -4 110 -grandfather 110 -serves 110 -texting 110 -younger 110 -artwork 110 -dozen 109 -mountainous 109 -skinny 109 -now 109 -n 109 -maroon 109 -bicyclists 109 -mixture 109 -peering 109 -cupboard 109 -elaborate 109 -pocket 109 -radio 109 -lodge 109 -artistic 109 -crouching 109 -curved 109 -macaroni 109 -alcohol 108 -newly 108 -needs 108 -cobblestone 108 -shots 108 -array 108 -angle 108 -blinds 108 -kettle 108 -mushroom 108 -stunts 108 -feeds 107 -sofas 107 -dip 107 -washer 107 -apart 107 -try 107 -flipping 107 -cool 107 -gourmet 107 -nightstand 107 -sad 107 -loaf 107 -infant 107 -clearing 107 -syrup 107 -weeds 106 -start 106 -teenagers 106 -somewhere 106 -sprinkled 106 -tidy 106 -hitter 106 -letting 106 -movie 106 -spots 106 -crackers 106 -japanese 106 -balance 106 -sleep 106 -craft 106 -fry 105 -billboard 105 -self 105 -bowling 105 -porcelain 105 -biker 105 -volley 105 -else 105 -remodeled 105 -bamboo 105 -nest 105 -indian 105 -rubber 105 -identical 105 -shrubbery 104 -extra 104 -cub 104 -cherry 104 -farmer 104 -avenue 104 -wash 104 -ha 104 -pineapple 104 -jelly 104 -awaiting 103 -engaged 103 -unfinished 103 -skiier 103 -upward 103 -suburban 103 -treats 103 -goal 103 -carriages 103 -bulls 103 -stump 103 -stair 102 -entree 102 -menu 102 -cast 102 -any 102 -united 102 -glowing 102 -done 102 -attractive 102 -viewing 102 -viewed 102 -rough 102 -chandelier 102 -lifting 102 -sushi 102 -selection 102 -stacks 102 -opens 101 -maker 101 -gaming 101 -blood 101 -pigeon 101 -crouched 101 -pajamas 101 -appliance 101 -plush 101 -notes 101 -locked 100 -spatula 100 -parts 100 -moored 100 -tuxedo 100 -traditional 100 -tying 100 -cooling 100 -banner 100 -buttons 100 -motel 100 -roller 100 -teaching 100 -lime 100 -lining 100 -cubs 99 -possibly 99 -altered 99 -hiking 99 -caption 99 -breaking 99 -pads 99 -old-fashioned 99 -navy 99 -alarm 99 -coast 99 -forested 99 -swung 99 -youth 99 -dancing 99 -barren 98 -harness 98 -pork 98 -falls 98 -care 98 -mural 98 -warm 98 -iced 98 -hoodie 97 -celery 97 -stripped 97 -local 97 -wheelchair 97 -ridding 97 -blocks 97 -interacting 97 -walled 97 -cutter 97 -bigger 97 -happily 97 -engines 97 -sharp 97 -grouped 97 -compact 96 -stripe 96 -towed 96 -ad 96 -transport 96 -pitchers 96 -fuzzy 96 -yogurt 96 -unloading 96 -playground 96 -adjacent 96 -trim 96 -letters 95 -separated 95 -ottoman 95 -equipped 95 -heels 95 -hut 95 -females 95 -junk 95 -sausages 95 -jeep 95 -chickens 95 -aged 95 -hanger 95 -pitched 94 -windshield 94 -afternoon 94 -eagle 94 -theme 94 -horned 94 -hipster 94 -steer 94 -dozens 94 -flowered 94 -pottery 94 -wrap 94 -coats 94 -firetruck 94 -furnishings 94 -bunny 94 -money 94 -cycles 94 -special 94 -powered 94 -whipped 94 -ram 94 -followed 93 -blocking 93 -socks 93 -feathers 93 -bends 93 -zone 93 -form 93 -skates 93 -halves 93 -donkey 93 -necktie 93 -peas 93 -clown 93 -oil 93 -handles 93 -pets 93 -silly 93 -perches 93 -called 93 -parasailing 93 -york 93 -company 92 -led 92 -check 92 -pipes 92 -grasses 92 -shaggy 92 -dim 92 -vandalized 92 -claim 92 -scale 92 -text 92 -aluminum 92 -dugout 92 -flatbed 91 -plan 91 -relish 91 -accents 91 -call 91 -unripe 91 -wandering 90 -gun 90 -wheeled 90 -hilly 90 -strings 90 -ovens 90 -panel 90 -outstretched 90 -poised 90 -bedding 90 -gym 90 -garlic 90 -members 90 -mean 90 -swans 90 -tram 90 -frisbe 89 -distant 89 -guests 89 -toiletries 89 -crest 89 -grizzly 89 -real 89 -swan 89 -pointed 89 -delta 89 -comfortable 89 -antelope 89 -directing 89 -setup 89 -character 89 -folding 89 -plenty 89 -quickly 89 -poking 89 -exit 89 -beyond 89 -portion 89 -cyclist 89 -sweatshirt 89 -driven 89 -provide 88 -decked 88 -peeling 88 -lego 88 -herbs 88 -waterfall 88 -cabbage 88 -sewing 88 -bricks 88 -ipod 88 -barefoot 88 -diving 87 -backwards 87 -cops 87 -raising 87 -interactive 87 -ave 87 -mixing 87 -kneels 87 -unable 87 -vast 87 -roast 87 -snow-covered 87 -steering 87 -sleek 87 -rim 87 -lead 87 -watched 86 -could 86 -cards 86 -peace 86 -shaded 86 -situated 86 -location 86 -compartment 86 -excited 86 -dinning 86 -& 86 -german 86 -crouches 86 -helps 86 -carved 86 -teen 86 -airline 86 -free 86 -trip 86 -chopping 85 -inflatable 85 -mopeds 85 -carrier 85 -t.v 85 -biplane 85 -those 85 -private 85 -slides 85 -leave 85 -holes 85 -stocked 85 -price 85 -stained 85 -t 85 -repair 85 -treat 85 -pay 85 -measuring 85 -share 85 -cooler 84 -news 84 -leaps 84 -corral 84 -knitted 84 -heavily 84 -saddle 84 -slab 84 -finished 84 -rolled 84 -gliding 84 -bites 84 -windsurfing 84 -cucumbers 84 -prop 84 -cages 83 -motorbikes 83 -holiday 83 -cyclists 83 -dolls 83 -oval 83 -handbag 83 -areas 83 -midst 83 -slowly 83 -portable 83 -champagne 83 -though 83 -o 83 -pillar 83 -shed 83 -briefcase 83 -mid-air 83 -ramps 83 -bbq 83 -anchored 82 -speaker 82 -tucked 82 -bend 82 -robe 82 -information 82 -mantle 82 -coleslaw 82 -cords 82 -perform 82 -completely 82 -page 82 -materials 82 -nearly 82 -stating 82 -travelers 82 -training 82 -meals 82 -threw 82 -pit 82 -smelling 81 -moon 81 -pirate 81 -route 81 -plugged 81 -filling 81 -layered 81 -ten 81 -strike 81 -numerals 81 -incoming 81 -m 81 -melted 81 -smartphone 81 -kayak 81 -shake 81 -liquor 81 -slow 81 -smiley 80 -combination 80 -cross-country 80 -plated 80 -parasol 80 -takeoff 80 -soaked 80 -bidet 80 -capped 80 -stew 80 -tulips 80 -outfield 80 -rabbit 80 -sculptures 80 -installed 79 -machines 79 -arch 79 -sled 79 -sparse 79 -times 79 -squash 79 -landed 79 -airfield 79 -king 79 -protective 79 -cola 79 -asparagus 79 -keep 79 -historic 79 -mess 79 -convention 79 -creek 79 -mixer 79 -material 79 -athlete 79 -roads 79 -club 79 -singing 79 -powder 79 -hydrants 79 -faced 79 -best 78 -skatepark 78 -wallpaper 78 -pushed 78 -cord 78 -presentation 78 -caution 78 -streetlight 78 -washington 78 -huddled 78 -iphone 78 -nothing 78 -rams 78 -tending 78 -state 78 -mugs 78 -caged 77 -thats 77 -stare 77 -studio 77 -basil 77 -buffalo 77 -casserole 77 -celebration 77 -labeled 77 -stems 77 -give 77 -medical 77 -grassland 77 -polo 77 -scratching 77 -angry 77 -volleyball 77 -curious 77 -waffles 77 -camping 77 -rodeo 77 -canadian 77 -dad 77 -chains 77 -rise 77 -printed 76 -emergency 76 -tiered 76 -nine 76 -comfortably 76 -coin 76 -soaring 76 -lower 76 -spaghetti 76 -wax 76 -makeup 76 -cheesecake 76 -surprised 76 -opposing 76 -taped 76 -yarn 75 -brocolli 75 -shelving 75 -dragon 75 -mustache 75 -instruments 75 -limit 75 -napkins 75 -cable 75 -mac 75 -lonely 75 -tires 75 -veggie 75 -touches 75 -summer 75 -st. 75 -colorfully 74 -yet 74 -amid 74 -cabinetry 74 -dashboard 74 -relaxes 74 -clearly 74 -bullet 74 -bleachers 74 -ends 74 -president 74 -preparation 74 -extremely 74 -glides 74 -penguin 74 -lab 74 -follows 74 -characters 74 -wool 74 -jam 73 -oncoming 73 -lifts 73 -somebody 73 -saucer 73 -posters 73 -fields 73 -sauces 73 -jockeys 73 -surround 73 -perch 73 -bookshelves 73 -draped 73 -ultimate 73 -everything 73 -darkened 73 -blueberries 73 -may 73 -even 73 -honey 73 -parents 72 -grand 72 -similar 72 -dresses 72 -tethered 72 -shredded 72 -states 72 -sporting 72 -choppy 72 -gazing 72 -featured 72 -quite 72 -canoes 72 -steamed 71 -trainer 71 -department 71 -watermelon 71 -parallel 71 -sold 71 -windy 71 -directional 71 -olive 71 -policemen 71 -taxis 71 -cloudless 71 -barrels 71 -carpeted 71 -file 71 -stretch 71 -mountainside 71 -barbecue 71 -nighttime 71 -lens 71 -pig 71 -siamese 71 -countertops 70 -consists 70 -beginning 70 -mexican 70 -extends 70 -everyone 70 -competitive 70 -strewn 70 -damaged 70 -greeting 70 -picked 70 -kit 70 -frames 70 -referee 70 -varieties 70 -say 70 -upper 70 -early 70 -need 70 -parent 70 -styled 70 -flowing 69 -stars 69 -crew 69 -begins 69 -carnival 69 -today 69 -relax 69 -bundle 69 -cafeteria 69 -exposed 69 -skiiers 69 -college 69 -burning 69 -crashing 69 -tortilla 68 -musical 68 -telling 68 -artificial 68 -arriving 68 -microwaves 68 -bad 68 -change 68 -arched 68 -border 68 -storefront 68 -icy 68 -automobile 68 -motorized 68 -camper 68 -buckets 68 -regular 68 -! 68 -projector 68 -student 68 -cameras 68 -marker 68 -somewhat 68 -morning 68 -cozy 68 -plow 67 -breast 67 -deli 67 -rounding 67 -vacant 67 -hello 67 -stones 67 -propellers 67 -safari 67 -peers 67 -mix 67 -paddles 67 -jumped 67 -tarp 67 -curly 67 -cemetery 67 -deserts 67 -airways 67 -pregnant 67 -enjoys 66 -vests 66 -queen 66 -rising 66 -mote 66 -dust 66 -finish 66 -strong 66 -sword 66 -wallet 66 -tug 66 -filed 66 -tool 66 -everywhere 66 -debris 66 -avocado 66 -milking 66 -wireless 66 -kiwi 66 -gated 66 -spices 66 -protest 66 -crown 65 -historical 65 -businesses 65 -obama 65 -exiting 65 -striking 65 -rotten 65 -metropolitan 65 -underwear 65 -curvy 65 -rises 65 -horn 65 -tide 65 -nestled 65 -overlooks 65 -bins 65 -waffle 65 -district 65 -stirring 65 -attention 65 -sanding 65 -5 65 -passed 65 -know 65 -project 64 -noses 64 -batters 64 -lovely 64 -winds 64 -warming 64 -let 64 -then 64 -balanced 64 -sexy 64 -speeds 64 -legged 64 -racer 64 -mirrored 64 -fill 64 -grabs 64 -lion 64 -sells 64 -butterfly 64 -sniffs 64 -presents 64 -skills 64 -mail 64 -splashing 63 -smothered 63 -garnish 63 -skyscrapers 63 -blows 63 -wok 63 -kicks 63 -depicts 63 -turquoise 63 -sprouts 63 -stem 63 -belly 63 -random 63 -shadows 63 -easy 63 -lanes 63 -skyscraper 63 -limes 63 -liner 63 -homes 63 -brand 63 -removed 63 -faded 62 -dilapidated 62 -submerged 62 -kiss 62 -splash 62 -kiteboarding 62 -cucumber 62 -accident 62 -digging 62 -cuddling 62 -guiding 62 -must 62 -serious 62 -plaque 61 -decorating 61 -styrofoam 61 -compete 61 -livestock 61 -meet 61 -hummingbird 61 -rather 61 -paste 61 -dorm 61 -bagels 61 -buying 61 -backhand 61 -garnished 61 -italian 61 -kittens 61 -present 61 -trailing 61 -snuggling 61 -gentlemen 61 -mouths 61 -beers 61 -oddly 61 -stalk 61 -paperwork 60 -knocked 60 -indicates 60 -oak 60 -nursing 60 -crowds 60 -pours 60 -expression 60 -gone 60 -bumper 60 -pushes 60 -retro 60 -smoothie 60 -ropes 60 -flavored 60 -linens 60 -pale 60 -wetsuits 60 -crafts 60 -me 60 -shaving 60 -picks 60 -architecture 60 -dance 60 -lounges 60 -ridden 60 -gazelle 60 -pumpkins 59 -cherries 59 -ascending 59 -caboose 59 -burnt 59 -tilted 59 -sepia 59 -grouping 59 -wheeler 59 -goose 59 -hook 59 -holders 59 -rugs 59 -frizbee 59 -slaw 59 -televisions 59 -belongings 59 -except 59 -: 59 -wrapping 59 -stoplights 58 -lettering 58 -tangerines 58 -dotted 58 -security 58 -late 58 -sandals 58 -hundreds 58 -mesh 58 -cinnamon 58 -bronze 58 -facility 58 -raises 58 -hawk 58 -should 58 -rooms 58 -daylight 58 -medicine 58 -tabletop 58 -boxing 58 -shady 58 -necklace 58 -citrus 58 -crystal 58 -embracing 58 -attending 58 -bricked 58 -milling 58 -destination 58 -autumn 58 -otherwise 58 -earth 58 -comb 58 -amidst 58 -fit 57 -describe 57 -foam 57 -pretending 57 -squirrel 57 -examining 57 -bottled 57 -docks 57 -wiping 57 -busted 57 -washed 57 -already 57 -vendors 57 -twilight 57 -lifted 57 -camouflage 57 -arrows 57 -canada 57 -littered 57 -tired 57 -napping 57 -partial 57 -your 57 -skull 57 -raspberries 57 -mill 57 -bean 57 -cracked 57 -incline 57 -bundled 56 -handicap 56 -climb 56 -sided 56 -stairway 56 -seeds 56 -exotic 56 -mama 56 -retail 56 -hairy 56 -blocked 56 -visitors 56 -entry 56 -magazines 56 -squares 56 -alert 56 -carton 56 -awning 56 -buss 56 -pita 56 -captured 56 -custom 56 -horizon 56 -hovering 56 -wines 56 -fine 56 -inspecting 56 -transporting 56 -ways 56 -pencils 56 -sectional 56 -formally 55 -platters 55 -backed 55 -pilot 55 -equestrian 55 -pillars 55 -walked 55 -chop 55 -washroom 55 -olympic 55 -monster 55 -admiring 55 -buy 55 -sunflowers 55 -torn 55 -appearing 55 -returns 55 -whit 55 -paneled 55 -typical 55 -patrons 55 -symbol 55 -fixture 55 -markers 55 -because 55 -starbucks 54 -basic 54 -cane 54 -stormy 54 -ceremony 54 -frog 54 -job 54 -goalie 54 -fight 54 -rv 54 -enormous 54 -escalator 54 -flips 54 -mass 54 -found 54 -remains 54 -waterfront 54 -seem 54 -hooded 54 -archway 54 -decorate 54 -hammer 54 -trashcan 54 -dome 54 -accompanied 54 -bells 54 -extending 54 -kits 54 -tip 54 -grain 54 -lean 54 -sandwhich 54 -tattoo 53 -lapse 53 -bandana 53 -flamingos 53 -bulldog 53 -stored 53 -dairy 53 -tournament 53 -peacock 53 -surfboarding 53 -aisle 53 -pump 53 -evergreen 53 -spire 53 -record 53 -deckered 53 -advertisements 53 -savanna 53 -amusement 53 -lite 53 -biking 53 -puppies 53 -hazy 53 -vane 53 -divided 53 -thumb 53 -hovers 53 -processor 53 -pc 53 -employees 53 -gates 53 -taxiing 53 -seafood 53 -grinds 53 -renovated 52 -tanker 52 -robot 52 -examines 52 -vending 52 -reclining 52 -beached 52 -instructions 52 -teal 52 -sesame 52 -waling 52 -packaged 52 -sunshine 52 -cramped 52 -got 52 -scissor 52 -dots 52 -owners 52 -spring 52 -mannequin 52 -athletic 52 -presented 52 -removing 52 -seed 52 -vans 52 -radishes 52 -grapefruit 52 -scenery 52 -ambulance 52 -swims 52 -handicapped 52 -beanie 52 -conductor 52 -recording 52 -climbs 51 -analog 51 -ancient 51 -guide 51 -mozzarella 51 -machinery 51 -marching 51 -calves 51 -hidden 51 -recliner 51 -hockey 51 -clad 51 -packages 51 -heater 51 -cityscape 51 -businessman 51 -chews 51 -drain 51 -shelter 51 -raining 51 -mascot 51 -pumpkin 51 -futon 51 -workstation 51 -raquet 51 -bouncing 51 -ornament 51 -approach 51 -shepherd 51 -shell 51 -reflective 51 -instrument 51 -triple 51 -tier 51 -airstrip 51 -radiator 51 -shaved 51 -dense 51 -handed 51 -trey 51 -dragging 51 -native 51 -sidecar 51 -overgrown 51 -thread 51 -adjusts 51 -ornaments 50 -west 50 -ofa 50 -baseman 50 -galloping 50 -would 50 -trolly 50 -parrots 50 -knit 50 -letter 50 -cd 50 -live 50 -haircut 50 -expanse 50 -downward 50 -alpine 50 -contemporary 50 -named 50 -laugh 50 -jug 50 -barber 50 -available 50 -octopus 50 -wig 50 -personnel 50 -fist 50 -candies 50 -political 50 -athletes 50 -settings 50 -snowsuit 50 -residence 50 -barely 50 -ponies 50 -flowery 50 -creepy 50 -beat 50 -peeks 50 -ticket 50 -sparsely 49 -chin 49 -interstate 49 -tofu 49 -dvd 49 -u.s. 49 -markings 49 -huddle 49 -gadgets 49 -rod 49 -amtrak 49 -official 49 -oxen 49 -reflects 49 -halloween 49 -patiently 49 -florets 49 -camp 49 -conversing 49 -peak 49 -hamburgers 49 -fedora 49 -neutral 49 -courts 49 -stockings 49 -speech 49 -salmon 49 -burner 49 -dropping 49 -garb 49 -krispy 49 -hooks 49 -pro 49 -wheelie 49 -currently 49 -calculator 49 -delivering 49 -casual 49 -roasted 49 -weathered 49 -finishing 49 -biscuits 49 -noodle 49 -boot 49 -trails 48 -wired 48 -pencil 48 -wedge 48 -length 48 -stuffing 48 -coca 48 -languages 48 -camel 48 -carried 48 -levels 48 -slanted 48 -concert 48 -practices 48 -carefully 48 -moment 48 -clocktower 48 -complex 48 -gift 48 -plantains 48 -came 48 -sleigh 48 -crab 48 -underground 48 -drapes 48 -crocheted 48 -spinning 48 -laundry 48 -licks 48 -beautifully 48 -poured 48 -waste 48 -probably 48 -vanilla 48 -gown 47 -positions 47 -engaging 47 -strips 47 -descending 47 -peach 47 -gang 47 -utensil 47 -cole 47 -focused 47 -captivity 47 -bill 47 -charging 47 -western 47 -upwards 47 -trekking 47 -peaches 47 -standard 47 -foraging 47 -shine 47 -attempt 47 -husky 47 -hedge 47 -berry 47 -snowing 47 -cushions 47 -push 47 -salt 47 -broadway 47 -showroom 47 -begin 47 -fleet 47 -fencing 47 -warns 47 -cactus 47 -sour 47 -median 47 -barriers 47 -solar 47 -blazer 47 -fences 47 -crawling 47 -readies 47 -idle 46 -spewing 46 -product 46 -breads 46 -makeshift 46 -marketplace 46 -sound 46 -) 46 -sidewalks 46 -herded 46 -household 46 -grove 46 -filthy 46 -yummy 46 -croissant 46 -hug 46 -burners 46 -vertical 46 -foal 46 -headlights 46 -tails 46 -vegetarian 46 -pressing 46 -shells 46 -hunched 46 -reins 46 -sections 46 -kreme 46 -mosaic 46 -peacefully 46 -compartments 46 -views 46 -daisies 46 -extreme 46 -bunt 46 -affixed 46 -biscuit 46 -snuggled 45 -wrong 45 -bubble 45 -year 45 -tongs 45 -ugly 45 -waist 45 -enough 45 -packing 45 -rhino 45 -peaking 45 -aboard 45 -refridgerator 45 -trimmed 45 -contest 45 -sunlit 45 -teens 45 -supermarket 45 -squats 45 -fedex 45 -barge 45 -reception 45 -theater 45 -coaster 45 -flowering 45 -speaks 45 -sleepy 45 -buried 45 -int 45 -nasty 45 -carving 45 -diaper 45 -eyed 45 -film 45 -populated 45 -cheeses 45 -consumption 45 -instead 45 -mice 45 -unused 45 -stylish 45 -patriotic 45 -charger 45 -channel 45 -cables 45 -pokes 45 -nature 44 -wooly 44 -customer 44 -captive 44 -lips 44 -ginger 44 -stoves 44 -horse-drawn 44 -groceries 44 -loft 44 -instructor 44 -claw 44 -central 44 -hoop 44 -fell 44 -skin 44 -lands 44 -needle 44 -might 44 -leashes 44 -salon 44 -bridle 44 -( 44 -important 44 -zucchini 44 -virgin 44 -turtle 44 -blooming 44 -added 44 -bib 44 -glow 44 -master 43 -boarded 43 -memorial 43 -grooming 43 -architectural 43 -awaits 43 -stay 43 -stereo 43 -spaces 43 -diced 43 -substance 43 -helmeted 43 -sight 43 -housing 43 -caps 43 -mp3 43 -signing 43 -took 43 -ox 43 -depicted 43 -ornamental 43 -claus 43 -races 43 -retriever 43 -woven 43 -server 43 -brass 43 -macbook 43 -grilling 43 -plank 43 -contraption 43 -motorboat 43 -lamppost 43 -label 43 -roam 43 -intricate 43 -cloths 43 -styles 43 -ferris 43 -banquet 43 -numeral 42 -overturned 42 -herds 42 -rust 42 -columns 42 -perfect 42 -tossed 42 -tosses 42 -rink 42 -omelet 42 -controlled 42 -royal 42 -rundown 42 -broth 42 -stylized 42 -hides 42 -docking 42 -stages 42 -curiously 42 -maintenance 42 -packs 42 -sorts 42 -gesture 42 -thomas 42 -every 42 -layed 42 -support 42 -th 42 -contact 42 -shack 42 -bases 42 -shooting 42 -chihuahua 42 -pepsi 42 -handled 42 -underside 42 -motes 42 -slippers 42 -ranch 42 -bushel 42 -san 42 -england 42 -knacks 42 -combing 42 -pauses 42 -toned 42 -lunging 42 -fireman 42 -asia 42 -california 42 -tattoos 42 -hoagie 42 -pic 42 -destroyed 42 -pelican 42 -litter 42 -leap 42 -idly 41 -burrito 41 -drop 41 -assembly 41 -blades 41 -palace 41 -brief 41 -rally 41 -wrestling 41 -blade 41 -canvas 41 -parasail 41 -browns 41 -famous 41 -seeing 41 -lightly 41 -paddock 41 -trailers 41 -sing 41 -rowboat 41 -worked 41 -gentle 41 -mechanical 41 -half-eaten 41 -swiss 41 -glider 41 -exterior 41 -diners 41 -pop 41 -beauty 41 -toilette 41 -burgers 41 -replica 41 -recreational 41 -newspapers 41 -flavors 41 -vines 41 -leopard 41 -linen 41 -nuzzling 41 -teapot 41 -boiled 40 -gloomy 40 -sell 40 -patterns 40 -elevator 40 -wavy 40 -root 40 -entire 40 -pantry 40 -salsa 40 -forehead 40 -meatball 40 -prey 40 -wrist 40 -clothed 40 -community 40 -ca 40 -marks 40 -mitts 40 -brushed 40 -surroundings 40 -quaint 40 -models 40 -atv 40 -dude 40 -think 40 -jewelry 40 -jack 40 -strap 40 -dalmatian 40 -halfway 40 -activities 40 -structures 40 -sat 40 -observe 40 -nook 40 -viewer 40 -blueberry 40 -fryer 40 -super 40 -spreading 40 -find 40 -eyeglasses 40 -hearts 40 -salads 39 -learn 39 -peels 39 -cave 39 -samsung 39 -rapids 39 -6 39 -fairly 39 -goers 39 -misses 39 -wander 39 -gathers 39 -arrive 39 -taller 39 -dunkin 39 -yield 39 -rugby 39 -newborn 39 -throughout 39 -bug 39 -changing 39 -panoramic 39 -handlebars 39 -fours 39 -luxury 39 -checked 39 -coated 39 -whose 39 -paneling 39 -parks 39 -deliver 39 -casting 39 -dangling 39 -boston 39 -divider 39 -receive 39 -animated 39 -laughs 39 -boating 39 -rounded 39 -beam 39 -boulders 39 -highchair 39 -maneuver 39 -frisby 38 -brownies 38 -tone 38 -peer 38 -squat 38 -foamy 38 -hammock 38 -triangle 38 -hours 38 -pane 38 -roasting 38 -pineapples 38 -wedges 38 -drops 38 -ford 38 -stalks 38 -difficult 38 -recently 38 -meatballs 38 -groomed 38 -rd 38 -lunges 38 -glaze 38 -panting 38 -tusk 38 -graphic 38 -murky 38 -yacht 38 -coaches 38 -flatbread 38 -height 38 -india 38 -streamers 38 -sweets 38 -scrambled 38 -toss 38 -investigating 38 -shark 38 -chicago 38 -rig 38 -caramel 37 -layers 37 -twelve 37 -pancake 37 -ceilings 37 -performance 37 -perspective 37 -presenting 37 -blur 37 -beads 37 -taco 37 -unknown 37 -twig 37 -plat 37 -knick 37 -overalls 37 -families 37 -angel 37 -robes 37 -hurdle 37 -tell 37 -lizard 37 -ribbons 37 -oversized 37 -messing 37 -aquarium 37 -hear 37 -gothic 37 -batch 37 -trotting 37 -laden 37 -boxed 37 -welcome 37 -cranes 37 -bone 37 -standup 37 -buds 37 -combo 37 -recipe 37 -lighter 37 -spanish 37 -region 37 -gifts 37 -ironing 37 -curbside 37 -unoccupied 37 -shears 37 -employee 37 -starts 37 -bordered 37 -cheeseburger 37 -costumed 37 -graduation 37 -demonstration 37 -injured 37 -modified 37 -appetizers 37 -sunrise 37 -column 37 -locations 36 -hash 36 -stroll 36 -lip 36 -operated 36 -leafless 36 -hour 36 -fixed 36 -crumbs 36 -bento 36 -spiral 36 -nightstands 36 -tightly 36 -kayaks 36 -studying 36 -releasing 36 -rescue 36 -rearview 36 -goofy 36 -propellor 36 -celebratory 36 -national 36 -my 36 -- 36 -paying 36 -plunger 36 -wrought 36 -mane 36 -sauerkraut 36 -south 36 -side-by-side 36 -whine 36 -airliners 36 -nokia 36 -expired 36 -tusked 36 -terrier 36 -staff 36 -preforming 36 -discussing 36 -playfully 36 -bitten 36 -flood 36 -social 36 -establishment 36 -trophy 36 -seasoning 36 -cabs 36 -renovation 36 -soars 36 -fix 35 -wants 35 -edges 35 -streetlights 35 -last 35 -uphill 35 -discussion 35 -spreads 35 -pitches 35 -pub 35 -sailors 35 -seal 35 -quiche 35 -gestures 35 -seasoned 35 -parasols 35 -facial 35 -flanked 35 -microphones 35 -bundt 35 -swimmers 35 -function 35 -frowning 35 -handmade 35 -grind 35 -quarters 35 -alleyway 35 -diagonal 35 -grow 35 -lavender 35 -victorian 35 -sunflower 35 -crazy 35 -croissants 35 -follow 35 -packaging 35 -allowed 35 -bits 35 -centerpiece 35 -playful 35 -navigating 35 -wheat 35 -launch 35 -folks 35 -discs 35 -lazy 35 -butcher 35 -oatmeal 35 -chatting 35 -submarine 35 -miscellaneous 35 -couples 35 -pong 35 -patrol 35 -shearing 35 -bushy 35 -poodle 35 -crying 35 -peanuts 35 -able 35 -converted 35 -headband 35 -backseat 35 -vine 35 -tree-lined 35 -constructed 35 -melon 35 -twigs 35 -intense 35 -union 34 -automobiles 34 -north 34 -bison 34 -mannequins 34 -rooster 34 -pick-up 34 -flash 34 -chunks 34 -pointy 34 -ping 34 -marsh 34 -purses 34 -freely 34 -outdated 34 -participating 34 -roped 34 -looked 34 -sox 34 -forrest 34 -japan 34 -wonderful 34 -butt 34 -sneaker 34 -repairs 34 -patient 34 -photographing 34 -whip 34 -banners 34 -revealing 34 -swiftly 34 -hitched 34 -almonds 34 -pinned 34 -commuters 34 -bedside 34 -mark 34 -tripod 34 -varying 34 -age 34 -dollar 34 -labels 34 -dumpster 34 -projection 33 -windmill 33 -racers 33 -directs 33 -petals 33 -web 33 -details 33 -bundles 33 -horizontal 33 -popular 33 -scoop 33 -triangular 33 -crochet 33 -bushels 33 -companion 33 -toddlers 33 -legos 33 -symbols 33 -fixes 33 -jean 33 -sure 33 -blown 33 -bearing 33 -cottage 33 -inner 33 -controlling 33 -twenty 33 -lifeguard 33 -workspace 33 -eiffel 33 -coupe 33 -parachutes 33 -feature 33 -defensive 33 -sigh 33 -backside 33 -livingroom 33 -convertible 33 -coral 33 -juicer 33 -pear 33 -loose 33 -mickey 33 -win 33 -least 33 -sheeps 33 -mats 33 -harley 33 -parlor 33 -platforms 33 -certain 33 -tshirt 33 -collie 33 -abstract 33 -curls 33 -surrounds 33 -detour 33 -pilots 33 -peaceful 33 -spilled 33 -galley 33 -burned 33 -mason 32 -childs 32 -celebrates 32 -flipped 32 -masks 32 -pelicans 32 -flush 32 -safe 32 -dingy 32 -religious 32 -cel 32 -aprons 32 -movies 32 -raincoat 32 -profile 32 -direct 32 -crooked 32 -half-pipe 32 -juvenile 32 -indicate 32 -neath 32 -canned 32 -socializing 32 -rooftop 32 -forehand 32 -stovetop 32 -visor 32 -medieval 32 -t-ball 32 -pins 32 -chalk 32 -crow 32 -crop 32 -slalom 32 -kiteboard 32 -nut 32 -bake 32 -booths 32 -activity 32 -upscale 32 -capitol 32 -floored 32 -award 32 -filming 32 -banks 32 -monk 32 -toliet 32 -misty 32 -velvet 32 -hollywood 32 -spanning 32 -numbered 32 -majestic 32 -icebox 32 -barricade 32 -tricycle 32 -silhouetted 32 -via 32 -cockpit 32 -handling 32 -scrub 32 -pudding 32 -grate 32 -basement 32 -bark 32 -tips 32 -france 32 -patches 32 -bows 32 -lace 32 -overlook 32 -whom 32 -crash 32 -era 31 -o'clock 31 -blind 31 -urn 31 -oar 31 -restaurants 31 -century 31 -snowman 31 -stately 31 -kitchenette 31 -knitting 31 -casually 31 -current 31 -burgundy 31 -always 31 -ordering 31 -protruding 31 -$ 31 -sprawled 31 -arches 31 -sidelines 31 -glassware 31 -dated 31 -drums 31 -coal 31 -bored 31 -minutes 31 -tipped 31 -beaks 31 -festive 31 -browsing 31 -vessel 31 -plums 31 -handler 31 -1 31 -offers 31 -given 31 -eatery 31 -nails 31 -barbeque 31 -ware 31 -yawning 31 -slop 31 -sprayed 31 -beams 31 -europe 31 -pigs 31 -planters 31 -steal 31 -mouses 31 -eachother 31 -obstacles 31 -cruising 31 -fellow 31 -cracker 31 -duffel 30 -headset 30 -wraps 30 -nibbling 30 -woolly 30 -emo 30 -tons 30 -breaded 30 -needles 30 -embedded 30 -ordered 30 -lost 30 -tells 30 -denim 30 -onward 30 -vibrant 30 -taste 30 -dives 30 -messages 30 -w 30 -sleeve 30 -coca-cola 30 -handstand 30 -receiving 30 -coconut 30 -dolly 30 -baker 30 -arrives 30 -usa 30 -mattresses 30 -clydesdale 30 -limbs 30 -homeless 30 -gummy 30 -celebrate 30 -overview 30 -ostriches 30 -maple 30 -amenities 30 -moss 30 -'re 30 -exercise 30 -francisco 30 -billowing 30 -crashed 30 -hilltop 30 -hairdryer 30 -cheek 30 -repairing 30 -auditorium 30 -donkeys 30 -temple 30 -await 30 -testing 30 -less 30 -brownie 30 -prairie 30 -cliffs 30 -tones 30 -outhouse 30 -naps 30 -halfpipe 30 -penny 30 -better 30 -offering 30 -normal 30 -mantel 30 -vent 30 -africa 30 -hip 30 -searching 30 -domed 29 -transported 29 -years 29 -hugs 29 -rode 29 -concept 29 -windsurfer 29 -friendly 29 -flys 29 -sprays 29 -fighters 29 -guards 29 -handrail 29 -breaks 29 -clip 29 -charter 29 -grasslands 29 -looms 29 -bloody 29 -matches 29 -inspects 29 -reigns 29 -combined 29 -hamster 29 -unattended 29 -swimsuit 29 -pecking 29 -greet 29 -adding 29 -soil 29 -booze 29 -witha 29 -auto 29 -diet 29 -maybe 29 -gigantic 29 -arts 29 -bmw 29 -arranging 29 -leafs 29 -sampling 29 -guides 29 -cowboys 29 -program 29 -blank 29 -license 29 -buliding 29 -lids 29 -sings 29 -ivy 29 -lobster 29 -hey 29 -doily 29 -chalkboard 29 -wildebeest 29 -alcoholic 29 -yak 29 -sodas 29 -copper 29 -facade 29 -3d 29 -tater 29 -stationed 29 -shrub 29 -peoples 29 -east 29 -clause 29 -tots 29 -law 29 -circles 29 -wildflowers 29 -globe 29 -dash 29 -boulder 29 -visiting 29 -ipad 29 -photography 29 -moose 29 -flour 29 -clustered 28 -starring 28 -altitude 28 -resemble 28 -billboards 28 -teacher 28 -skaters 28 -chip 28 -petted 28 -steaming 28 -parsley 28 -razor 28 -prints 28 -melting 28 -tanks 28 -wife 28 -linked 28 -press 28 -dental 28 -date 28 -mature 28 -demolished 28 -medal 28 -planted 28 -munching 28 -fireworks 28 -netting 28 -bloom 28 -arabic 28 -adorn 28 -motorists 28 -womans 28 -parmesan 28 -bubbles 28 -rared 28 -member 28 -fisheye 28 -sheering 28 -converse 28 -ruler 28 -ornately 28 -waking 28 -goatee 28 -filtered 28 -8 28 -gay 28 -luxurious 28 -ton 28 -blenders 28 -sailor 28 -cds 28 -shutters 28 -pews 28 -youths 28 -placemat 28 -discarded 28 -memorabilia 28 -observes 28 -angels 28 -restored 28 -saddled 28 -notebooks 28 -cobble 28 -grows 28 -dispensers 28 -attachment 28 -fenced-in 28 -cigarettes 28 -cleaner 28 -loveseat 28 -tilting 27 -sprinkle 27 -hawaiian 27 -elbow 27 -marking 27 -pockets 27 -puffy 27 -lasagna 27 -technology 27 -antiques 27 -cider 27 -beets 27 -landscaping 27 -abundance 27 -leaking 27 -concentrating 27 -ripped 27 -incredible 27 -keeps 27 -attraction 27 -jetliners 27 -av 27 -strolling 27 -missed 27 -allow 27 -jesus 27 -snake 27 -study 27 -badminton 27 -arid 27 -thre 27 -elaborately 27 -lapel 27 -prices 27 -protect 27 -teammates 27 -sheared 27 -wades 27 -teh 27 -streaming 27 -bandanna 27 -shakes 27 -loads 27 -vw 27 -southwest 27 -flames 27 -holing 27 -skeleton 27 -speckled 27 -perfectly 27 -competitor 27 -tissues 27 -ditch 27 -davidson 27 -drawings 27 -mount 27 -remodeling 27 -waterskiing 27 -takeout 27 -middle-aged 27 -outlet 27 -slender 27 -nowhere 27 -concerned 27 -princess 27 -sipping 27 -purchase 27 -did 27 -creamy 27 -beagle 27 -aiming 27 -acting 27 -ages 27 -jacuzzi 27 -rounds 27 -cocktail 27 -senior 27 -serviced 27 -seaweed 27 -heating 26 -cuisine 26 -lock 26 -non 26 -launching 26 -america 26 -poor 26 -heron 26 -amazing 26 -nude 26 -international 26 -pacifier 26 -spice 26 -mailbox 26 -grains 26 -source 26 -cooker 26 -sloped 26 -juggling 26 -antelopes 26 -edible 26 -tinfoil 26 -gripping 26 -why 26 -pumps 26 -fronts 26 -eagerly 26 -departing 26 -mens 26 -canyon 26 -seaside 26 -panels 26 -intensely 26 -potter 26 -detailed 26 -dinosaur 26 -gazes 26 -blvd 26 -demonstrating 26 -husband 26 -moved 26 -bomber 26 -rhinos 26 -cotton 26 -fuel 26 -prominent 26 -marshmallows 26 -countries 26 -glide 26 -oars 26 -slats 26 -shaker 26 -warmly 26 -collared 26 -leftover 26 -minimal 26 -dips 26 -picket 26 -capture 26 -tattooed 26 -ribs 26 -artist 26 -workshop 26 -long-haired 26 -crafting 26 -pawing 26 -granola 26 -ridge 26 -topless 26 -automatic 26 -proudly 26 -crashes 26 -creating 26 -firefighters 26 -rains 26 -hero 26 -bust 26 -tuna 26 -rushing 26 -onlooker 26 -shoppers 26 -flatscreen 26 -favorite 26 -trade 26 -pound 26 -ago 26 -shut 26 -etc 26 -guitars 26 -marine 26 -cycling 26 -external 26 -gulls 26 -pristine 26 -emerging 26 -creme 26 -contain 26 -aqua 26 -occupied 26 -unopened 26 -stairwell 26 -goblets 26 -orchard 26 -laps 26 -angled 26 -rumpled 26 -assembled 26 -paris 26 -scaffolding 26 -rat 26 -desolate 26 -seemingly 26 -knot 26 -shipping 26 -want 25 -avocados 25 -adorns 25 -tvs 25 -plowing 25 -formations 25 -l 25 -blackberry 25 -pooh 25 -e 25 -picturesque 25 -liberty 25 -vertically 25 -reflections 25 -flushing 25 -organic 25 -potty 25 -protesting 25 -curtained 25 -possible 25 -nurse 25 -riverbank 25 -keeping 25 -focal 25 -walker 25 -cob 25 -interested 25 -fired 25 -media 25 -battery 25 -crack 25 -firemen 25 -sticky 25 -jail 25 -raisins 25 -handheld 25 -hate 25 -lazily 25 -punch 25 -toasting 25 -hedges 25 -chases 25 -scenes 25 -distorted 25 -hips 25 -credit 25 -gorilla 25 -feild 25 -dreary 25 -poorly 25 -cardinal 25 -hungry 25 -plethora 25 -saucers 25 -batteries 25 -tights 25 -nuzzle 25 -spending 25 -apparatus 25 -manual 25 -switch 25 -maintained 25 -mcdonald 25 -angles 25 -hearty 25 -remodel 25 -suspenders 25 -completing 25 -procession 25 -underwater 25 -tech 24 -pressed 24 -bra 24 -gears 24 -gum 24 -university 24 -advertises 24 -additional 24 -passanger 24 -strung 24 -turkeys 24 -chick 24 -skylight 24 -gazelles 24 -exchanging 24 -wreath 24 -version 24 -weight 24 -traveler 24 -faucets 24 -solitary 24 -creature 24 -drum 24 -ordinary 24 -mangoes 24 -seashore 24 -streetcar 24 -sales 24 -chops 24 -coastline 24 -march 24 -congregate 24 -stocking 24 -theatre 24 -uneaten 24 -projected 24 -b 24 -lingerie 24 -flow 24 -balding 24 -reveals 24 -guided 24 -pallet 24 -provides 24 -dvds 24 -sweat 24 -photoshopped 24 -filmed 24 -clutching 24 -pliers 24 -boulevard 24 -caring 24 -wrappers 24 -mossy 24 -drizzled 24 -lowered 24 -tagging 24 -highly 24 -winery 24 -placid 24 -wars 24 -strikes 24 -chute 24 -common 24 -semi-truck 24 -apparently 24 -jerseys 24 -overflowing 24 -armchair 24 -clusters 24 -related 24 -wade 24 -motocross 24 -chase 24 -mint 24 -toll 24 -embankment 24 -fitted 24 -dying 24 -attendant 24 -businessmen 24 -gazebo 24 -doctor 24 -snap 24 -dangerous 24 -jogging 24 -centered 24 -partition 24 -cuddled 24 -isle 24 -canes 23 -guarding 23 -butterflies 23 -obscured 23 -eleven 23 -bump 23 -exposure 23 -cities 23 -hairbrush 23 -brussel 23 -cheering 23 -7 23 -visit 23 -waiter 23 -flavor 23 -express 23 -kiosk 23 -thicket 23 -broom 23 -arrangements 23 -bought 23 -birdhouse 23 -crowding 23 -chubby 23 -attacking 23 -llama 23 -crisp 23 -avoid 23 -wagons 23 -cot 23 -baseballs 23 -gnar 23 -broke 23 -patties 23 -rub 23 -hygiene 23 -keeper 23 -fills 23 -shoot 23 -victory 23 -primitive 23 -heat 23 -poll 23 -notepad 23 -dressers 23 -built-in 23 -extinguisher 23 -dummy 23 -honda 23 -violin 23 -upstairs 23 -chew 23 -nets 23 -created 23 -attic 23 -cuckoo 23 -connecting 23 -duffle 23 -bikinis 23 -campus 23 -monkeys 23 -plug 23 -plus 23 -fatigues 23 -plans 23 -lantern 23 -labrador 23 -infront 23 -filing 23 -differently 23 -twisted 23 -designated 23 -casts 23 -skiies 23 -quilted 23 -venue 23 -quick 23 -wiimote 23 -cuddles 23 -components 23 -magnifying 23 -protesters 23 -bringing 22 -needed 22 -para-sailing 22 -brunette 22 -easel 22 -novelty 22 -feather 22 -coins 22 -swamp 22 -rectangle 22 -battle 22 -shares 22 -unloaded 22 -stain 22 -congratulations 22 -dug 22 -lincoln 22 -sking 22 -winning 22 -corridor 22 -occasion 22 -daffodils 22 -nail 22 -entertaining 22 -lets 22 -flashing 22 -cordless 22 -zombie 22 -motif 22 -dell 22 -melons 22 -damage 22 -walk-in 22 -act 22 -gesturing 22 -bookcases 22 -secluded 22 -cape 22 -colt 22 -article 22 -temporary 22 -youngster 22 -reacts 22 -motorcyle 22 -piercing 22 -pavilion 22 -create 22 -minivan 22 -slight 22 -knifes 22 -pacific 22 -trench 22 -guns 22 -sliver 22 -upcoming 22 -infield 22 -fifth 22 -scheme 22 -signaling 22 -eggplant 22 -remaining 22 -confused 22 -tale 22 -pizzeria 22 -10 22 -fixings 22 -bowtie 22 -remove 22 -tiling 22 -finishes 22 -flame 22 -sanctuary 22 -judge 22 -merry 22 -well-lit 22 -once 22 -statute 22 -whale 22 -spider 22 -draft 22 -aside 22 -checkerboard 22 -signpost 22 -ith 22 -patty 22 -assisting 22 -wares 22 -pause 22 -hoses 22 -brake 22 -mayo 22 -privacy 22 -signed 22 -wilted 22 -bathrooms 22 -exhaust 22 -whites 22 -hound 22 -cubes 22 -railings 22 -bridges 22 -lecture 22 -accent 22 -inspect 22 -sony 22 -interact 22 -react 22 -rocket 22 -padded 21 -binder 21 -servings 21 -speedboat 21 -barred 21 -nurses 21 -protection 21 -roundabout 21 -pastel 21 -basins 21 -tubes 21 -earphones 21 -underpass 21 -likes 21 -broad 21 -quarter 21 -kisses 21 -calmly 21 -tins 21 -trimming 21 -sample 21 -description 21 -pesto 21 -snow-capped 21 -appetizing 21 -mayonnaise 21 -straddling 21 -joke 21 -plumbing 21 -days 21 -perching 21 -uncovered 21 -bales 21 -turf 21 -gull 21 -guacamole 21 -vacation 21 -tubs 21 -alcove 21 -wrench 21 -disorganized 21 -rowboats 21 -retrieving 21 -hope 21 -glue 21 -buoy 21 -chopper 21 -evil 21 -ex 21 -paints 21 -disgusting 21 -texas 21 -pleasant 21 -latest 21 -ma 21 -gymnasium 21 -thermometer 21 -options 21 -dripping 21 -manicured 21 -futuristic 21 -rapid 21 -trek 21 -spilling 21 -artistically 21 -knobs 21 -darth 21 -scape 21 -packet 21 -rubble 21 -ink 21 -fondant 21 -rubs 21 -conditions 21 -kiwis 21 -walkers 21 -maneuvers 21 -sweaters 21 -well-dressed 21 -colonial 21 -resembles 21 -doctors 21 -gap 21 -stories 21 -add 21 -humans 21 -confined 21 -multitude 21 -spires 21 -barb 21 -sip 21 -gutter 21 -saw 21 -montage 21 -badly 21 -backsplash 21 -hauls 21 -kayaking 21 -seasonings 21 -effect 21 -updated 21 -convenience 21 -reddish 21 -barking 21 -lagoon 21 -captures 21 -lavish 21 -sons 21 -reflect 21 -crouch 20 -humming 20 -eighteen 20 -smashed 20 -intent 20 -receipt 20 -harnessed 20 -sock 20 -chat 20 -strainer 20 -audio 20 -hipsters 20 -emblem 20 -peek 20 -tangled 20 -squatted 20 -apartments 20 -ass 20 -blossom 20 -timer 20 -gallery 20 -officials 20 -competitors 20 -appetizer 20 -ajar 20 -surfboarder 20 -cutout 20 -expensive 20 -curl 20 -snowmobile 20 -nike 20 -peaks 20 -surfaces 20 -herder 20 -pretzels 20 -decent 20 -blossoms 20 -reclines 20 -panorama 20 -t-shirts 20 -records 20 -forklift 20 -filth 20 -olympics 20 -bale 20 -manner 20 -lemonade 20 -sheer 20 -dawn 20 -walnuts 20 -ferns 20 -stations 20 -scout 20 -titled 20 -tattered 20 -dunes 20 -sloppy 20 -lowers 20 -begging 20 -penguins 20 -offspring 20 -snowstorm 20 -airshow 20 -turban 20 -intersecting 20 -rafts 20 -sloping 20 -grips 20 -losing 20 -fastened 20 -vader 20 -chocolates 20 -korean 20 -inn 20 -aid 20 -illustration 20 -untidy 20 -firetrucks 20 -yachts 20 -adds 20 -loaves 20 -straightening 20 -decorates 20 -washes 20 -wipes 20 -orders 20 -parka 20 -mounds 20 -guest 20 -teammate 20 -apparel 20 -sucking 20 -feel 20 -spotless 20 -ate 20 -jumper 20 -boar 20 -adjust 20 -longboard 20 -scary 20 -speak 20 -handlers 20 -grated 20 -alike 20 -newer 20 -campaign 20 -aeroplane 20 -quietly 20 -lions 20 -rackett 20 -dollhouse 20 -brother 20 -scarves 20 -tinted 20 -ducklings 20 -clowns 20 -samples 20 -unpaved 20 -closer 20 -grasping 19 -internet 19 -modest 19 -lipstick 19 -access 19 -goblet 19 -sightseeing 19 -magnetic 19 -lavatory 19 -handful 19 -climate 19 -rendering 19 -actively 19 -miss 19 -pace 19 -sniff 19 -glare 19 -crops 19 -flops 19 -lick 19 -lava 19 -wrecked 19 -micro 19 -cellar 19 -excitedly 19 -yoga 19 -depart 19 -khaki 19 -portions 19 -period 19 -freez 19 -toes 19 -shorn 19 -stork 19 -overstuffed 19 -fold 19 -grounds 19 -floss 19 -sheered 19 -lazing 19 -oats 19 -score 19 -fox 19 -width 19 -cantaloupe 19 -shepard 19 -summit 19 -bunched 19 -elegantly 19 -thee 19 -carpeting 19 -grape 19 -hump 19 -breed 19 -embrace 19 -heated 19 -fo 19 -sweeping 19 -overweight 19 -grins 19 -tokyo 19 -selecting 19 -sands 19 -county 19 -squirting 19 -halved 19 -supply 19 -bridal 19 -per 19 -organ 19 -protecting 19 -verdant 19 -nun 19 -contemplating 19 -performer 19 -implements 19 -attendants 19 -uncut 19 -upside-down 19 -'d 19 -decals 19 -orchids 19 -slatted 19 -bristles 19 -australia 19 -uk 19 -pepperonis 19 -fits 19 -luggages 19 -stark 19 -scooping 19 -bring 19 -ina 19 -outer 19 -cranberry 19 -maneuvering 19 -mad 19 -shield 19 -frolicking 19 -tuck 19 -energy 19 -unhappy 19 -merchandise 19 -ray 19 -insect 19 -cameraman 19 -ruins 19 -satellite 19 -tractors 19 -parchment 19 -capital 19 -grafitti 19 -feast 19 -pedal 19 -spectator 19 -blouse 19 -herb 19 -geisha 19 -brings 19 -hog 19 -silhouettes 19 -pages 19 -mounting 19 -touring 19 -glaring 19 -target 19 -rested 19 -active 19 -went 19 -chess 19 -smokes 19 -protected 19 -mop 19 -doggy 19 -clapping 19 -bathed 19 -ewe 19 -clips 19 -brushy 19 -pouch 19 -hanged 19 -rag 19 -coastal 19 -iwth 19 -choose 19 -destinations 19 -sequence 19 -fir 18 -pretzel 18 -lasso 18 -gadget 18 -lufthansa 18 -glassed 18 -grave 18 -yamaha 18 -shared 18 -flamingo 18 -appointed 18 -separating 18 -armor 18 -defaced 18 -mario 18 -bot 18 -showcasing 18 -cock 18 -pandas 18 -examine 18 -videogame 18 -sack 18 -mercedes 18 -tuxedos 18 -monorail 18 -chests 18 -sponge 18 -cobbled 18 -fourth 18 -solo 18 -higher 18 -showcase 18 -birth 18 -floppy 18 -anything 18 -sugared 18 -winnie 18 -angeles 18 -textured 18 -fudge 18 -greek 18 -jugs 18 -antenna 18 -chicks 18 -rifle 18 -cgi 18 -ripened 18 -inverted 18 -bee 18 -flickr 18 -straws 18 -never 18 -panini 18 -buggies 18 -zones 18 -gps 18 -paid 18 -graveyard 18 -armed 18 -ladle 18 -chairlift 18 -alot 18 -explaining 18 -coloring 18 -firefighter 18 -bud 18 -rhinoceros 18 -orderly 18 -backward 18 -witting 18 -proud 18 -test 18 -due 18 -wineglass 18 -nasa 18 -alter 18 -unison 18 -growth 18 -wintery 18 -priest 18 -harry 18 -gorgeous 18 -knight 18 -writes 18 -decal 18 -streams 18 -350 18 -government 18 -leashed 18 -ghost 18 -kale 18 -vegtables 18 -suited 18 -species 18 -menus 18 -racetrack 18 -describing 18 -specialty 18 -aligned 18 -sees 18 -bruised 18 -pretends 18 -shredding 18 -humorous 18 -accessible 18 -proper 18 -muzzle 18 -jalapenos 18 -teenaged 18 -puzzle 18 -engineer 18 -ale 18 -table.. 18 -files 18 -parasailers 18 -attentively 18 -seaplane 18 -accordion 18 -# 18 -sleeves 18 -diesel 18 -sugary 18 -grinning 18 -darkness 18 -raspberry 18 -danger 18 -florida 18 -crushed 18 -captain 18 -rollerblading 18 -unload 18 -movement 18 -build 18 -alaska 18 -cookware 18 -stock 18 -watery 18 -junction 18 -straps 18 -inviting 18 -socialize 18 -hula 18 -boiling 18 -song 18 -jetway 18 -font 18 -seperate 18 -shovel 18 -tar 18 -ethnic 18 -unpacked 18 -rotting 18 -concentrates 17 -sealed 17 -dyed 17 -blended 17 -curving 17 -dumping 17 -canopies 17 -foodstuffs 17 -disposable 17 -drags 17 -u 17 -mule 17 -heights 17 -espresso 17 -tupperware 17 -emitting 17 -flashlight 17 -hatch 17 -entrees 17 -resembling 17 -wildebeests 17 -cube 17 -perhaps 17 -preserve 17 -claws 17 -forefront 17 -bucking 17 -contained 17 -coconuts 17 -bodies 17 -session 17 -occupy 17 -smooth 17 -dj 17 -dr 17 -naval 17 -v 17 -trophies 17 -rugged 17 -dune 17 -performers 17 -juicy 17 -gloved 17 -enthusiastically 17 -russian 17 -gilded 17 -ups 17 -soaking 17 -penned 17 -sundae 17 -skinned 17 -calendar 17 -crock 17 -bookstore 17 -videos 17 -dew 17 -duct 17 -girafee 17 -hiker 17 -mm 17 -god 17 -dropped 17 -sectioned 17 -units 17 -temperature 17 -instruction 17 -bounces 17 -usb 17 -earrings 17 -shelve 17 -camo 17 -bodysuit 17 -crammed 17 -mohawk 17 -slim 17 -lunchbox 17 -wolf 17 -hardware 17 -vein 17 -muscular 17 -selections 17 -los 17 -fronted 17 -telephones 17 -mid-jump 17 -soon 17 -chimney 17 -loop 17 -based 17 -battered 17 -18 17 -switching 17 -aa 17 -bulb 17 -tot 17 -toe 17 -ankle 17 -pyramid 17 -charge 17 -dries 17 -skillfully 17 -minor 17 -spool 17 -hydrogen 17 -flea 17 -bills 17 -atm 17 -longhorn 17 -release 17 -eastern 17 -marbled 17 -punk 17 -siding 17 -accented 17 -clump 17 -keychain 17 -shorter 17 -involved 17 -lucky 17 -lotion 17 -hike 17 -observed 17 -artisan 17 -mist 17 -efficiency 17 -daily 17 -bartender 17 -dig 17 -dial 17 -chipped 17 -muscles 17 -gotten 17 -please 17 -google 17 -bead 17 -host 17 -condiment 17 -doubledecker 17 -mainly 17 -roosters 17 -season 17 -gauges 17 -orioles 17 -choices 17 -windmills 16 -rich 16 -wheelbarrow 16 -k 16 -shoots 16 -address 16 -retrieve 16 -balconies 16 -grounded 16 -spins 16 -chilli 16 -enforcement 16 -ponytail 16 -hangings 16 -hairless 16 -coverings 16 -teaches 16 -slacks 16 -snout 16 -lowering 16 -747 16 -fairy 16 -dividers 16 -descends 16 -knights 16 -lanterns 16 -cant 16 -zombies 16 -batman 16 -anticipation 16 -attaching 16 -swampy 16 -dachshund 16 -upholstered 16 -screened 16 -erected 16 -closing 16 -swimmer 16 -spiky 16 -dolphin 16 -dumplings 16 -impressive 16 -cash 16 -milkshake 16 -eclectic 16 -comfy 16 -storefronts 16 -perpendicular 16 -landscaped 16 -typewriter 16 -our 16 -skying 16 -stucco 16 -rare 16 -polished 16 -shopped 16 -causing 16 -citizens 16 -runners 16 -dodgers 16 -future 16 -navigates 16 -humongous 16 -stables 16 -mid-swing 16 -exhibition 16 -spa 16 -postcard 16 -wedged 16 -marshy 16 -urns 16 -butting 16 -greasy 16 -folder 16 -ii 16 -marinara 16 -hikers 16 -skii 16 -terrace 16 -mothers 16 -penn 16 -nunchuck 16 -colander 16 -paths 16 -pea 16 -feta 16 -la 16 -planks 16 -respected 16 -virtual 16 -confetti 16 -disney 16 -stood 16 -bees 16 -cresting 16 -easily 16 -surgical 16 -armoire 16 -health 16 -tends 16 -conveyer 16 -continental 16 -neckties 16 -opponents 16 -anticipating 16 -overhang 16 -returned 16 -joined 16 -register 16 -d 16 -un 16 -drift 16 -bunting 16 -property 16 -mousepad 16 -cay 16 -1950 16 -mcdonalds 16 -exercising 16 -delivered 16 -snapshot 16 -peddling 16 -lilies 16 -outskirts 16 -tacos 16 -shift 16 -hotels 16 -anyone 16 -turnips 16 -awkward 16 -silo 16 -mate 16 -mit 16 -shadowy 16 -steers 16 -wineglasses 16 -proceed 16 -aims 16 -wheeling 16 -holidays 16 -goodies 16 -indicator 16 -performed 16 -plowed 16 -brought 16 -prison 16 -creative 16 -congested 16 -packets 16 -badge 16 -loving 16 -wanting 16 -cushioned 16 -giants 16 -origami 16 -vultures 16 -paperback 16 -volkswagon 16 -mingle 16 -dancers 16 -thinking 16 -dealership 16 -gondola 16 -trained 16 -imac 16 -gnome 16 -drab 16 -tangerine 16 -quality 16 -nailed 16 -ever 16 -undergoing 16 -further 16 -often 16 -restaraunt 16 -limo 16 -hazard 16 -waitress 16 -chaise 16 -pausing 16 -thought 16 -teddybear 16 -curry 16 -tortillas 16 -frothy 16 -broccolli 16 -huts 16 -snuggles 15 -bulding 15 -conditioner 15 -crows 15 -seriously 15 -fashionable 15 -dirtbike 15 -derby 15 -nursery 15 -cuddle 15 -flyers 15 -john 15 -confusing 15 -scallops 15 -embroidered 15 -youngsters 15 -mountaintop 15 -tend 15 -necessary 15 -fountains 15 -territory 15 -estate 15 -dramatic 15 -griddle 15 -messed 15 -squid 15 -partner 15 -50 15 -toothpicks 15 -passage 15 -prep 15 -coolers 15 -polish 15 -affectionate 15 -allowing 15 -alien 15 -meatloaf 15 -automated 15 -garment 15 -skirts 15 -ollie 15 -repaired 15 -pics 15 -omelette 15 -sex 15 -towns 15 -relaxed 15 -sister 15 -squadron 15 -snoozing 15 -props 15 -coo 15 -baguette 15 -means 15 -quote 15 -rue 15 -chunk 15 -entryway 15 -crumbling 15 -novel 15 -dusting 15 -yelling 15 -shit 15 -pail 15 -totally 15 -generated 15 -flat-screen 15 -disrepair 15 -icecream 15 -spout 15 -handwritten 15 -mosquito 15 -bicycling 15 -arrived 15 -wakeboarding 15 -musician 15 -barricades 15 -pretend 15 -buttered 15 -handsome 15 -laces 15 -sorting 15 -exciting 15 -collecting 15 -skulls 15 -acrobatic 15 -enters 15 -peacocks 15 -bound 15 -strangely 15 -ump 15 -checker 15 -colgate 15 -breeze 15 -thatched 15 -banister 15 -heel 15 -picker 15 -illuminate 15 -wreck 15 -polaroid 15 -garland 15 -spare 15 -intricately 15 -simply 15 -lesson 15 -fire-hydrant 15 -inclosure 15 -sedan 15 -ads 15 -motorola 15 -splashes 15 -reveal 15 -his/her 15 -wanders 15 -waterside 15 -momma 15 -evergreens 15 -unpeeled 15 -bystanders 15 -hers 15 -blending 15 -ana 15 -bathrobe 15 -tours 15 -hunting 15 -hoping 15 -appropriate 15 -consumed 15 -haul 15 -comic 15 -tugboat 15 -congregating 15 -patchwork 15 -trinkets 15 -ceremonial 15 -yaks 15 -autographs 15 -toyota 15 -trolleys 15 -carvings 15 -stemmed 15 -dodge 15 -steeples 15 -theres 15 -geometric 15 -unripened 15 -connect 15 -puppet 15 -chased 15 -outfielder 15 -proximity 15 -childrens 15 -wildly 15 -pops 15 -fabrics 15 -comical 15 -rowers 15 -finely 15 -australian 15 -smeared 15 -attended 15 -dye 15 -skewers 15 -wtih 15 -grits 15 -knob 15 -pate 15 -locker 14 -over-sized 14 -fro 14 -tundra 14 -listens 14 -suspension 14 -crossroads 14 -twisting 14 -roots 14 -calzone 14 -outfitted 14 -interviewed 14 -ave. 14 -presses 14 -admire 14 -nears 14 -thanksgiving 14 -choosing 14 -plot 14 -cappuccino 14 -draws 14 -smoggy 14 -born 14 -corona 14 -elvis 14 -surboard 14 -systems 14 -amounts 14 -walnut 14 -ankles 14 -slicer 14 -vaulted 14 -shocked 14 -wonder 14 -demonic 14 -paraphernalia 14 -serene 14 -feasting 14 -blindfolded 14 -popping 14 -detail 14 -stirred 14 -rental 14 -filter 14 -rodent 14 -amateur 14 -vigorously 14 -gushing 14 -weed 14 -delicate 14 -buddha 14 -crumpled 14 -collect 14 -contrasting 14 -satchel 14 -diffrent 14 -bomb 14 -articulated 14 -sunbathing 14 -stilts 14 -snowshoes 14 -torso 14 -condition 14 -science 14 -arcade 14 -actors 14 -engage 14 -oblong 14 -spelled 14 -necessities 14 -luncheon 14 -dc 14 -fires 14 -deciding 14 -cowgirl 14 -readying 14 -user 14 -afro 14 -parasailer 14 -enthusiasts 14 -surprise 14 -visitor 14 -springs 14 -anchor 14 -mismatched 14 -tux 14 -offered 14 -gross 14 -leftovers 14 -search 14 -campground 14 -pared 14 -overly 14 -park-like 14 -escape 14 -mates 14 -wilting 14 -parliament 14 -shavings 14 -extension 14 -overhand 14 -greyhound 14 -contrails 14 -shading 14 -gently 14 -clipped 14 -compute 14 -veil 14 -alligator 14 -navigate 14 -belongs 14 -nuzzles 14 -peripherals 14 -rules 14 -interest 14 -loom 14 -overloaded 14 -freeze 14 -tandem 14 -potential 14 -twp 14 -25 14 -trooper 14 -unidentifiable 14 -scruffy 14 -replaced 14 -graffitti 14 -pontoon 14 -chandeliers 14 -flapping 14 -operates 14 -cleared 14 -duty 14 -join 14 -crispy 14 -kimono 14 -aloft 14 -dusted 14 -combs 14 -ducky 14 -unbuttoned 14 -sheepdog 14 -forming 14 -smell 14 -films 14 -dinette 14 -tote 14 -stony 14 -pendulum 14 -backing 14 -popcorn 14 -falcon 14 -parakeet 14 -yellowish 14 -halter 14 -transparent 14 -angrily 14 -unusually 14 -envelope 14 -write 14 -thai 14 -peopel 14 -pamphlets 14 -cleans 14 -content 14 -sling 14 -staying 14 -6th 14 -mary 14 -shakers 14 -affection 14 -thames 14 -monks 14 -solid 14 -italy 14 -blooms 14 -domestic 14 -custard 14 -merchant 14 -parasails 14 -streak 14 -comfort 14 -20 14 -toiled 14 -allows 14 -propping 14 -turbulent 14 -empire 14 -comically 13 -screaming 13 -stillness 13 -choice 13 -locks 13 -mango 13 -bi-plane 13 -para-sail 13 -respective 13 -bulletin 13 -connection 13 -frisbie 13 -wrinkled 13 -harnesses 13 -amish 13 -glows 13 -rods 13 -become 13 -trainers 13 -4-way 13 -bangs 13 -tiara 13 -professionals 13 -gaze 13 -poop 13 -inspired 13 -producing 13 -wallpapered 13 -instructing 13 -charming 13 -links 13 -cause 13 -proceeds 13 -rafting 13 -heeled 13 -blinders 13 -observation 13 -pallets 13 -dear 13 -happening 13 -strolls 13 -pedestals 13 -miles 13 -buidling 13 -outs 13 -ramen 13 -plastered 13 -tacks 13 -rafters 13 -baltimore 13 -weaving 13 -braces 13 -meaty 13 -squinting 13 -dials 13 -fern 13 -grasps 13 -bulbs 13 -deflated 13 -parakeets 13 -adventure 13 -encased 13 -roofs 13 -engraved 13 -wrote 13 -kangaroo 13 -attend 13 -articles 13 -g 13 -dreadlocks 13 -identification 13 -crude 13 -pike 13 -prize 13 -cheetah 13 -listen 13 -traversing 13 -bovine 13 -outward 13 -twins 13 -crafted 13 -delivers 13 -piers 13 -vessels 13 -forth 13 -de 13 -graduates 13 -p 13 -supporting 13 -pedals 13 -fling 13 -hover 13 -blend 13 -treed 13 -withe 13 -canisters 13 -thermos 13 -campers 13 -necklaces 13 -providing 13 -nutritious 13 -greenish 13 -stains 13 -cents 13 -struggles 13 -salami 13 -supports 13 -sparkler 13 -cork 13 -particular 13 -ans 13 -os 13 -caucasian 13 -waxing 13 -crusted 13 -mulch 13 -leisurely 13 -re 13 -munches 13 -tasks 13 -awesome 13 -average 13 -cilantro 13 -vacuum 13 -mast 13 -birdcage 13 -tp 13 -clothesline 13 -inches 13 -casing 13 -butts 13 -mansion 13 -creates 13 -hamper 13 -involving 13 -topper 13 -vicinity 13 -vespa 13 -closest 13 -koala 13 -protector 13 -damp 13 -egret 13 -chevrolet 13 -bakers 13 -kraut 13 -copy 13 -corned 13 -teacup 13 -grid 13 -silk 13 -consuming 13 -gardening 13 -exchange 13 -flashes 13 -actual 13 -elf 13 -satin 13 -bracelet 13 -field.. 13 -remarkable 13 -sleeveless 13 -flooding 13 -bowing 13 -bulldozer 13 -logos 13 -rapidly 13 -ally 13 -brace 13 -sunken 13 -duplicate 13 -reeds 13 -atmosphere 13 -whisk 13 -headdress 13 -receiver 13 -catering 13 -descend 13 -plating 13 -mamma 13 -shrine 13 -saluting 13 -bracelets 13 -investigates 13 -trestle 13 -icons 13 -actually 13 -baseline 13 -one-way 13 -saddles 13 -draw 13 -measure 13 -exactly 13 -ample 13 -unseen 13 -warmer 13 -cricket 13 -3rd 13 -guardrail 13 -ban 13 -seattle 13 -staged 13 -mile 13 -obelisk 13 -mare 13 -grating 13 -awkwardly 13 -terminals 13 -cardigan 13 -wheelchairs 13 -googly 13 -calling 13 -zooms 13 -changed 13 -slider 13 -removes 13 -longer 13 -shinning 13 -exploring 13 -chugs 13 -attentive 13 -foyer 13 -cove 13 -calls 13 -executing 13 -sparkling 13 -doggie 13 -plywood 13 -c 13 -finds 13 -dive 13 -headlight 13 -sick 13 -telescope 12 -smirk 12 -extend 12 -obese 12 -professionally 12 -peson 12 -fisherman 12 -shawl 12 -postal 12 -tricky 12 -diapers 12 -scoops 12 -gooey 12 -seperated 12 -marquee 12 -paired 12 -chow 12 -popsicle 12 -asking 12 -tablets 12 -expansive 12 -taxidermy 12 -widow 12 -riverside 12 -sprinkling 12 -knelt 12 -trots 12 -patrolling 12 -kitesurfing 12 -deal 12 -whiskey 12 -windowed 12 -enthusiast 12 -arranges 12 -briefcases 12 -shattered 12 -creatures 12 -squeezed 12 -affectionately 12 -remain 12 -jetty 12 -scantily 12 -charm 12 -politician 12 -announcing 12 -rivers 12 -fetch 12 -vehicular 12 -grinder 12 -hide 12 -final 12 -seminar 12 -rims 12 -tress 12 -assembling 12 -donation 12 -two-story 12 -simulator 12 -egyptian 12 -sunbathers 12 -outcropping 12 -question 12 -breasts 12 -mules 12 -fowl 12 -catsup 12 -kettles 12 -successful 12 -diagonally 12 -listed 12 -danish 12 -cooktop 12 -venice 12 -scuba 12 -careful 12 -contrast 12 -gallon 12 -focusing 12 -casino 12 -panties 12 -swirl 12 -overripe 12 -documents 12 -arugula 12 -renovations 12 -ther 12 -airy 12 -tucks 12 -toothpick 12 -crepe 12 -streaks 12 -en 12 -document 12 -choo 12 -underway 12 -nearing 12 -barbwire 12 -unbaked 12 -magnet 12 -praying 12 -creation 12 -mustang 12 -processing 12 -nibbles 12 -handlebar 12 -posting 12 -grease 12 -boaters 12 -scrap 12 -id 12 -outline 12 -vinyl 12 -camouflaged 12 -bathe 12 -purchased 12 -stirs 12 -ferret 12 -recycling 12 -hugged 12 -rabbits 12 -artsy 12 -illuminates 12 -texts 12 -pf 12 -pm 12 -deployed 12 -lumber 12 -album 12 -slate 12 -man-made 12 -mating 12 -participate 12 -cubby 12 -crosstown 12 -'m 12 -burritos 12 -scratches 12 -vancouver 12 -inserted 12 -spotlight 12 -booster 12 -mash 12 -cruiser 12 -cruises 12 -enclosures 12 -scratch 12 -graffitied 12 -urinating 12 -zoomed 12 -ascends 12 -lightening 12 -frolic 12 -louis 12 -aimed 12 -wiener 12 -mack 12 -stamp 12 -pomegranate 12 -properly 12 -underbrush 12 -wih 12 -farmland 12 -gras 12 -powdery 12 -cemetary 12 -erect 12 -website 12 -onlooking 12 -crucifix 12 -jackson 12 -12 12 -bumping 12 -grip 12 -linoleum 12 -whimsical 12 -emptied 12 -wiped 12 -quesadilla 12 -winged 12 -sparrow 12 -tilts 12 -taxing 12 -mic 12 -artfully 12 -bustling 12 -messily 12 -original 12 -almond 12 -tows 12 -wardrobe 12 -elk 12 -54 12 -navel 12 -convoy 12 -66 12 -pod 12 -ravioli 12 -ladders 12 -investigate 12 -suckling 12 -crepes 12 -faux 12 -scared 12 -passport 12 -similarly 12 -touched 12 -roofed 12 -ruffled 12 -warms 12 -dove 12 -slot 12 -jobs 12 -scottish 12 -cigar 12 -flannel 12 -scraps 12 -backsides 12 -toiler 12 -graphics 12 -tint 12 -simultaneously 12 -unkempt 12 -descent 12 -perimeter 12 -upturned 12 -stake 12 -trouble 12 -jousting 12 -lookers 12 -pastrami 12 -fliers 12 -grater 12 -multi-story 12 -scanner 12 -edited 12 -uniquely 12 -daisy 12 -fielder 12 -x 12 -yankees 12 -drag 12 -exits 12 -upset 12 -sustenance 12 -baord 12 -barefooted 12 -doorways 12 -dolphins 12 -peeping 12 -composite 12 -deco 12 -grade 12 -bold 12 -struggling 12 -spoonful 12 -expressing 12 -downed 12 -ranger 12 -rusting 12 -mowed 12 -adolescent 12 -ivory 12 -sprout 12 -isolated 12 -secured 12 -arrival 12 -vivid 12 -locking 12 -kill 12 -disabled 12 -cobblestones 12 -learns 12 -paisley 12 -moustache 12 -squeezing 12 -blacktop 12 -centre 12 -violet 12 -shabby 12 -talbe 11 -wand 11 -corporate 11 -peperoni 11 -role 11 -pecks 11 -rush 11 -adidas 11 -piping 11 -untouched 11 -extravagant 11 -infamous 11 -atlantic 11 -fifteen 11 -chilly 11 -greenhouse 11 -collide 11 -giraffee 11 -germany 11 -cylinder 11 -barbie 11 -death 11 -zipper 11 -assists 11 -carnations 11 -attachments 11 -darkly 11 -powerful 11 -trucked 11 -amp 11 -wakeboard 11 -goodbye 11 -llamas 11 -save 11 -skateboarded 11 -feminine 11 -bob 11 -lockers 11 -nearest 11 -funky 11 -tugging 11 -sites 11 -charcoal 11 -nypd 11 -varied 11 -y 11 -locomotives 11 -tranquil 11 -elizabeth 11 -detroit 11 -pinstripe 11 -beaded 11 -traveled 11 -daughters 11 -nick 11 -tack 11 -coordinating 11 -joy 11 -largest 11 -stir-fry 11 -writings 11 -tether 11 -sippy 11 -tigers 11 -breath 11 -rooftops 11 -resturant 11 -prominently 11 -curling 11 -carves 11 -al 11 -tapestry 11 -hued 11 -clutches 11 -loves 11 -labelled 11 -dismantled 11 -steet 11 -soar 11 -streamlined 11 -robotic 11 -footprints 11 -atrium 11 -knoll 11 -mechanic 11 -plater 11 -anime 11 -opener 11 -frown 11 -recorder 11 -houseboat 11 -browned 11 -scouts 11 -elder 11 -overlooked 11 -plugs 11 -sparklers 11 -attack 11 -judges 11 -rays 11 -dwelling 11 -dollop 11 -stoop 11 -folders 11 -windsurfers 11 -shampoo 11 -crabs 11 -overcoat 11 -splits 11 -auction 11 -dalmation 11 -merging 11 -beet 11 -teachers 11 -advantage 11 -1st 11 -courthouse 11 -atvs 11 -standstill 11 -idea 11 -applying 11 -anchovies 11 -protestors 11 -footed 11 -checkers 11 -vcr 11 -loses 11 -pills 11 -sinking 11 -browse 11 -fish-eye 11 -sombrero 11 -photographers 11 -innocent 11 -customized 11 -crusts 11 -pickled 11 -adjoining 11 -frosty 11 -sydney 11 -2012 11 -outlined 11 -mets 11 -manhole 11 -splayed 11 -mills 11 -ashore 11 -oakland 11 -enjoyed 11 -wi 11 -launches 11 -ripening 11 -happen 11 -truly 11 -pylons 11 -volkswagen 11 -suites 11 -algae 11 -soiled 11 -dull 11 -dinnerware 11 -data 11 -mixers 11 -tailgate 11 -pride 11 -reader 11 -subject 11 -scarfs 11 -whats 11 -stting 11 -largely 11 -ascent 11 -mixes 11 -24 11 -22 11 -mein 11 -purpose 11 -recreation 11 -grimaces 11 -spell 11 -completed 11 -stranded 11 -l-shaped 11 -tailed 11 -ruined 11 -nesting 11 -released 11 -jammed 11 -transports 11 -awful 11 -bookbag 11 -dine 11 -soaps 11 -unidentified 11 -bottoms 11 -blackberries 11 -pendant 11 -musicians 11 -crossbones 11 -wth 11 -watercraft 11 -spooning 11 -it.. 11 -sprayer 11 -graduate 11 -unfurnished 11 -campsite 11 -colliding 11 -devil 11 -mlb 11 -elements 11 -chillin 11 -pour 11 -detailing 11 -afghan 11 -cosmetics 11 -concession 11 -waring 11 -renaissance 11 -corded 11 -spear 11 -scarecrow 11 -orchid 11 -southern 11 -sweeper 11 -puff 11 -grates 11 -selfies 11 -scallions 11 -fridges 11 -wharf 11 -cutlery 11 -purchasing 11 -joint 11 -dishing 11 -orphanage 11 -tapes 11 -sideline 11 -prepping 11 -vista 11 -jones 11 -accessory 11 -hippo 11 -pat 11 -lightning 11 -pensive 11 -geared 11 -5th 11 -dividing 11 -organization 11 -magnificent 11 -drenched 11 -huddling 11 -34th 11 -advanced 11 -essentials 11 -whirlpool 11 -corners 11 -raring 11 -whiskers 11 -siblings 11 -ladybug 11 -departure 11 -pirates 11 -cropped 11 -unwrapped 11 -spears 11 -sittin 11 -greets 11 -inlet 11 -gliders 11 -tiers 11 -boom 11 -woody 10 -crotch 10 -barstools 10 -feeling 10 -singer 10 -admires 10 -former 10 -sumo 10 -shutter 10 -prior 10 -installing 10 -intended 10 -emirates 10 -30 10 -scrubs 10 -canning 10 -promotional 10 -colts 10 -pomeranian 10 -bagged 10 -excitement 10 -compared 10 -croutons 10 -watermelons 10 -lollipop 10 -portraits 10 -sending 10 -fe 10 -scoreboard 10 -sledding 10 -upclose 10 -trapped 10 -intriguing 10 -handbags 10 -contending 10 -buddy 10 -likely 10 -met 10 -fitting 10 -toped 10 -shoveling 10 -journey 10 -vineyard 10 -awards 10 -protein 10 -woodland 10 -skilled 10 -spicy 10 -20th 10 -maps 10 -mmm 10 -gowns 10 -services 10 -mouthwash 10 -expert 10 -grainy 10 -task 10 -nordic 10 -reached 10 -settee 10 -welcoming 10 -snuggle 10 -excellent 10 -prosciutto 10 -carcass 10 -grandmother 10 -collars 10 -wakeboarder 10 -wagging 10 -animation 10 -trot 10 -believing 10 -skewered 10 -joker 10 -fender 10 -boxcars 10 -handrails 10 -well-made 10 -demon 10 -prince 10 -patchy 10 -pleased 10 -payphone 10 -joe 10 -stunning 10 -lookign 10 -attracts 10 -duvet 10 -looming 10 -dollars 10 -tear 10 -crumbled 10 -studies 10 -bordering 10 -prone 10 -representing 10 -madison 10 -brow 10 -thinks 10 -gallop 10 -fiving 10 -streaking 10 -determined 10 -lanyard 10 -pastor 10 -lei 10 -yawns 10 -see-through 10 -highlighted 10 -glares 10 -hosing 10 -tavern 10 -rickshaw 10 -cutouts 10 -dedicated 10 -reacting 10 -entirely 10 -pigtails 10 -cupola 10 -nerd 10 -foothills 10 -ultra 10 -dwarfed 10 -bugs 10 -since 10 -pup 10 -bridled 10 -flaming 10 -awnings 10 -... 10 -billows 10 -ice-cream 10 -modeled 10 -twist 10 -railways 10 -buckled 10 -adobe 10 -hoof 10 -pebbles 10 -modeling 10 -chilling 10 -clinging 10 -felt 10 -creatively 10 -multicolor 10 -borders 10 -amazed 10 -drainage 10 -take-out 10 -hybrid 10 -propellors 10 -beacon 10 -charms 10 -dribbles 10 -medium-sized 10 -tended 10 -envelopes 10 -ontop 10 -lampposts 10 -wholly 10 -footlong 10 -clippings 10 -brochures 10 -bakes 10 -growling 10 -plaster 10 -puddles 10 -standng 10 -shadowed 10 -surfboarders 10 -pearls 10 -clover 10 -r 10 -armored 10 -rimmed 10 -undone 10 -teach 10 -stoned 10 -parading 10 -stabbed 10 -lessons 10 -chain-link 10 -manmade 10 -scratched 10 -bumps 10 -wiring 10 -crusty 10 -fives 10 -authentic 10 -crt 10 -guinea 10 -dimensional 10 -eager 10 -whiteboard 10 -rubbish 10 -slip 10 -classy 10 -artichoke 10 -yells 10 -peal 10 -scones 10 -booties 10 -triumph 10 -posh 10 -usually 10 -slat 10 -mingling 10 -papered 10 -mysterious 10 -boxers 10 -convex 10 -zooming 10 -crowed 10 -gatorade 10 -drizzle 10 -dr. 10 -airfrance 10 -unlit 10 -fascinating 10 -loader 10 -decrepit 10 -delight 10 -11 10 -monochrome 10 -efficient 10 -dice 10 -plump 10 -feeders 10 -general 10 -sewer 10 -caked 10 -f 10 -seashells 10 -vegetated 10 -hutch 10 -fluid 10 -interacts 10 -flaps 10 -proceeding 10 -civil 10 -stomachs 10 -etched 10 -decadent 10 -flyer 10 -rent 10 -pennsylvania 10 -slushy 10 -motorcylce 10 -finding 10 -flows 10 -cheer 10 -wonderland 10 -graham 10 -wrestle 10 -duster 10 -sin 10 -tortoiseshell 10 -grassing 10 -oversize 10 -flutes 10 -blt 10 -chevy 10 -bureau 10 -juts 10 -grumpy 10 -trimmings 10 -partaking 10 -noon 10 -netbook 10 -refueling 10 -klm 10 -fulled 10 -kneel 10 -snail 10 -charity 10 -appealing 10 -paul 10 -slowing 10 -valve 10 -worm 10 -failing 10 -occupants 10 -snows 10 -vegan 10 -goldfish 10 -instructional 10 -cross-legged 10 -notice 10 -wreaths 10 -clippers 10 -kneepads 10 -lobsters 10 -fathers 10 -leeks 10 -recessed 10 -sundown 10 -kickstand 10 -michigan 10 -smartphones 10 -westminster 10 -refreshments 10 -segments 10 -vodka 10 -definitely 10 -mild 10 -checkout 10 -avoiding 10 -soul 10 -maid 10 -pumping 10 -disassembled 10 -ranges 10 -rye 10 -bleak 10 -lookout 10 -pomegranates 10 -raise 10 -tab 10 -bones 10 -ion 10 -handset 10 -cookbook 10 -changes 10 -sandwhiches 10 -relatively 10 -hearth 10 -redheaded 10 -quantity 10 -flakes 10 -swords 10 -lacy 10 -cubicles 10 -sandwiched 10 -cartoons 10 -nutella 10 -drooping 10 -marmalade 10 -wishes 10 -toasters 10 -superimposed 10 -thousands 10 -discuss 10 -stern 9 -fin 9 -bannister 9 -mache 9 -troll 9 -complicated 9 -grandma 9 -ona 9 -washbasin 9 -tiki 9 -engages 9 -beware 9 -civilians 9 -exposing 9 -chipping 9 -saint 9 -compass 9 -sips 9 -suburbs 9 -cheddar 9 -boutonniere 9 -saucepan 9 -carriers 9 -disembark 9 -mid-flight 9 -zookeeper 9 -pasted 9 -heart-shaped 9 -lcd 9 -furnishing 9 -conventional 9 -photoshop 9 -remnants 9 -hundred 9 -sturdy 9 -hoisted 9 -kart 9 -lived 9 -wet-suit 9 -redone 9 -salvation 9 -newlywed 9 -advertised 9 -gain 9 -highest 9 -operate 9 -caterpillar 9 -warped 9 -congratulating 9 -gourds 9 -squad 9 -queens 9 -ratchet 9 -strain 9 -assist 9 -expressions 9 -puffs 9 -treetops 9 -shapped 9 -valentine 9 -chart 9 -pamphlet 9 -lacrosse 9 -easter 9 -gallops 9 -stret 9 -later 9 -pugs 9 -staning 9 -rotating 9 -homeplate 9 -antlers 9 -caddy 9 -rushes 9 -unzipped 9 -bail 9 -lavishly 9 -flushed 9 -faster 9 -companions 9 -refrigerated 9 -escorting 9 -icon 9 -cheap 9 -ironic 9 -delectable 9 -patrick 9 -list 9 -sum 9 -flap 9 -untied 9 -9 9 -pipeline 9 -fireplug 9 -fron 9 -freezers 9 -footstool 9 -electricity 9 -daybed 9 -hurt 9 -multi-color 9 -paintbrush 9 -retaining 9 -startled 9 -piercings 9 -nachos 9 -calfs 9 -carpets 9 -wondering 9 -wrenches 9 -kong 9 -padding 9 -surgery 9 -accept 9 -pant 9 -bouquets 9 -equestrians 9 -send 9 -twine 9 -drill 9 -thirteen 9 -spill 9 -pines 9 -firehydrant 9 -fancily 9 -co 9 -volvo 9 -forty 9 -transformed 9 -cranberries 9 -lily 9 -prom 9 -lunches 9 -tackling 9 -observers 9 -struck 9 -operator 9 -hogs 9 -knick-knacks 9 -won 9 -grills 9 -trucking 9 -wizard 9 -experiment 9 -pools 9 -kiddie 9 -suzuki 9 -hi 9 -rights 9 -logging 9 -playstation 9 -dunking 9 -fort 9 -brownish 9 -shave 9 -pain 9 -beachfront 9 -framing 9 -munch 9 -dipped 9 -prohibiting 9 -lo 9 -bespectacled 9 -citizen 9 -beaten 9 -floret 9 -weathervane 9 -pinkish 9 -goo 9 -steamboat 9 -soups 9 -[ 9 -aircrafts 9 -feathered 9 -pre 9 -southwestern 9 -regarding 9 -kingdom 9 -provided 9 -clams 9 -included 9 -belonging 9 -circuit 9 -mini-fridge 9 -toga 9 -ergonomic 9 -tub/shower 9 -usual 9 -plaques 9 -propane 9 -oceans 9 -specially 9 -example 9 -mimicking 9 -layout 9 -contemplates 9 -textbook 9 -downwards 9 -armrest 9 -artichokes 9 -installation 9 -hes 9 -te 9 -crests 9 -brim 9 -decoratively 9 -candlelight 9 -microsoft 9 -hyde 9 -slid 9 -meant 9 -utilizing 9 -coffe 9 -irish 9 -regal 9 -hors 9 -nozzle 9 -cornbread 9 -sadly 9 -boxcar 9 -webcam 9 -mitten 9 -applied 9 -boars 9 -boxer 9 -eyeing 9 -juices 9 -forms 9 -sauteed 9 -kickflip 9 -screwdriver 9 -chestnut 9 -magenta 9 -peeing 9 -collected 9 -cheerful 9 -shred 9 -emptying 9 -spaniel 9 -fierce 9 -owls 9 -backlit 9 -photographic 9 -dutch 9 -downstairs 9 -culinary 9 -arial 9 -archways 9 -dances 9 -dancer 9 -uninstalled 9 -disks 9 -35 9 -blizzard 9 -messenger 9 -croquet 9 -double-decked 9 -nudging 9 -chewed 9 -55 9 -successfully 9 -illuminating 9 -coop 9 -individually 9 -releases 9 -marathon 9 -dimly-lit 9 -sculpted 9 -densely 9 -protrudes 9 -swirly 9 -markets 9 -wrestler 9 -inflated 9 -make-up 9 -consist 9 -mock 9 -demonstrates 9 -smith 9 -twirling 9 -campfire 9 -fluorescent 9 -linger 9 -granny 9 -reporter 9 -robin 9 -until 9 -murals 9 -shirted 9 -stamps 9 -crescent 9 -silos 9 -latte 9 -routes 9 -williams 9 -wall-mounted 9 -canon 9 -exhausted 9 -strand 9 -blues 9 -blackboard 9 -manger 9 -tart 9 -depiction 9 -informational 9 -chance 9 -tickets 9 -poppy 9 -walkie 9 -riverboat 9 -las 9 -tangle 9 -medley 9 -xbox 9 -vegas 9 -aggressively 9 -bowler 9 -fascinated 9 -crumb 9 -dummies 9 -adjustable 9 -cautiously 9 -nad 9 -enclose 9 -medals 9 -winner 9 -cabbages 9 -jams 9 -blackened 9 -shaves 9 -vitamin 9 -mexico 9 -abundant 9 -technique 9 -styling 9 -earbuds 9 -wrangling 9 -cylindrical 9 -fading 9 -jumpsuit 9 -directed 9 -bleeding 9 -uncomfortable 9 -dam 9 -photo-shopped 9 -curves 9 -appearance 9 -helped 9 -swimsuits 9 -bobble 9 -walkways 9 -executes 9 -arizona 9 -icicles 9 -unpainted 9 -caravan 9 -playroom 9 -beanbag 9 -clicking 9 -lorry 9 -persian 9 -outcrop 9 -george 9 -tarts 9 -viewpoint 9 -wintry 9 -barges 9 -canister 9 -kilt 9 -comforters 9 -britain 9 -riderless 9 -smal 9 -protects 9 -hil 9 -footpath 9 -contrail 9 -safely 9 -shoving 9 -wile 9 -blue-and-white 9 -slug 9 -climber 8 -pinto 8 -grapefruits 8 -mangos 8 -grooms 8 -triangles 8 -stays 8 -minute 8 -queue 8 -pouches 8 -vision 8 -formed 8 -rags 8 -experience 8 -trellis 8 -weedy 8 -tummy 8 -awake 8 -sundaes 8 -span 8 -disheveled 8 -sweaty 8 -thigh 8 -matress 8 -vying 8 -tented 8 -wed 8 -pressure 8 -separates 8 -hong 8 -cashews 8 -scrubber 8 -traffice 8 -alertly 8 -macintosh 8 -rind 8 -kabob 8 -bazaar 8 -uneven 8 -phrase 8 -predators 8 -housed 8 -unfolded 8 -spouting 8 -application 8 -turbines 8 -timey 8 -sheriff 8 -examined 8 -cocoa 8 -finch 8 -grille 8 -niche 8 -passageway 8 -digs 8 -annoyed 8 -meets 8 -unicorn 8 -yes 8 -hatchback 8 -holly 8 -lieing 8 -again 8 -taht 8 -development 8 -valves 8 -nobody 8 -hen 8 -bails 8 -screensaver 8 -burns 8 -obscene 8 -lever 8 -eat-in 8 -spirit 8 -relief 8 -fillings 8 -qantas 8 -off-road 8 -susan 8 -necked 8 -heaped 8 -sporty 8 -meaning 8 -sanctioned 8 -bonnet 8 -workbench 8 -fifties 8 -lakeside 8 -draining 8 -nourishment 8 -sash 8 -organizer 8 -shores 8 -blinder 8 -victoria 8 -attendees 8 -trampoline 8 -hyrdrant 8 -servicing 8 -runways 8 -diverse 8 -taught 8 -gong 8 -ar 8 -neglected 8 -drifting 8 -checkpoint 8 -dandelion 8 -red-haired 8 -hoods 8 -bi 8 -outwards 8 -boeing 8 -rotunda 8 -apricots 8 -sleds 8 -paragliding 8 -indifferent 8 -plentiful 8 -flagpole 8 -dumped 8 -stroke 8 -gingerbread 8 -mold 8 -margarita 8 -clumps 8 -barrow 8 -grimacing 8 -foaming 8 -ballpark 8 -comics 8 -cartons 8 -nerf 8 -gaining 8 -flattened 8 -batches 8 -dribbling 8 -laminate 8 -bass 8 -ash 8 -passersby 8 -barack 8 -trashcans 8 -latin 8 -horrible 8 -harbour 8 -made-up 8 -acoustic 8 -matter 8 -doves 8 -judged 8 -firsbee 8 -iv 8 -foilage 8 -commuting 8 -effort 8 -pure 8 -designer 8 -earlier 8 -marijuana 8 -personalized 8 -cacti 8 -girlfriend 8 -milks 8 -pleasure 8 -to-go 8 -tender 8 -examples 8 -pee 8 -chaps 8 -moderate 8 -harvest 8 -ref 8 -balck 8 -patting 8 -mooring 8 -shelfs 8 -variations 8 -jeeps 8 -draught 8 -wood-paneled 8 -harper 8 -arrayed 8 -wound 8 -carnation 8 -specific 8 -plantain 8 -core 8 -nd 8 -sharpening 8 -hashbrowns 8 -ann 8 -counter-top 8 -woolen 8 -runaway 8 -devoid 8 -ok 8 -oj 8 -od 8 -braided 8 -duo 8 -decaying 8 -martini 8 -beast 8 -demonstrate 8 -tastefully 8 -hyenas 8 -instructs 8 -peripheral 8 -simulated 8 -soccor 8 -s. 8 -beetle 8 -squared 8 -moldy 8 -kennel 8 -turbine 8 -toiletry 8 -tomatos 8 -reporters 8 -intercept 8 -souffle 8 -cabins 8 -jutting 8 -clipping 8 -garnishment 8 -clinton 8 -ferries 8 -shetland 8 -2013 8 -minimalist 8 -astride 8 -cinder 8 -sibling 8 -calculators 8 -negative 8 -movable 8 -farming 8 -exercises 8 -droplets 8 -smoothies 8 -becoming 8 -brothers 8 -supported 8 -roaring 8 -bodyboarding 8 -slam 8 -mandarin 8 -chinatown 8 -sidwalk 8 -shoelace 8 -motorhome 8 -booty 8 -applesauce 8 -cheeks 8 -clouded 8 -peter 8 -barbershop 8 -ward 8 -cookbooks 8 -disco 8 -tastes 8 -clings 8 -raccoon 8 -emerges 8 -sensor 8 -ceramics 8 -plume 8 -decks 8 -oin 8 -ascend 8 -erase 8 -sophisticated 8 -appreciating 8 -component 8 -comparing 8 -ensemble 8 -dentist 8 -avatar 8 -clipboard 8 -inthe 8 -swooping 8 -lama 8 -safeway 8 -toting 8 -volunteer 8 -muscle 8 -masked 8 -thinly 8 -functional 8 -biggest 8 -fabulous 8 -delicacy 8 -plats 8 -splashed 8 -bandages 8 -rolex 8 -attempted 8 -promoting 8 -spools 8 -drier 8 -hooves 8 -passangers 8 -scrubby 8 -d.c 8 -elongated 8 -known 8 -artful 8 -developed 8 -suspicious 8 -mommy 8 -vice 8 -ripeness 8 -sil 8 -badges 8 -july 8 -catholic 8 -snacking 8 -bronco 8 -bedspreads 8 -stowed 8 -talkie 8 -squeeze 8 -whether 8 -30th 8 -radish 8 -printing 8 -monogrammed 8 -glossy 8 -spokes 8 -twists 8 -wristband 8 -hangers 8 -modem 8 -prepped 8 -sleeved 8 -projects 8 -continues 8 -manipulated 8 -slabs 8 -crayons 8 -windsurfs 8 -lattice 8 -odds 8 -cubical 8 -recognizable 8 -receives 8 -wispy 8 -braves 8 -brave 8 -courch 8 -take-off 8 -wigs 8 -bullpen 8 -whatever 8 -loosened 8 -booklet 8 -addresses 8 -poem 8 -skill 8 -intersections 8 -technician 8 -farmhouse 8 -blindfold 8 -swaddled 8 -ending 8 -said 8 -tucking 8 -smoked 8 -floaters 8 -tartar 8 -nectar 8 -alto 8 -creams 8 -q 8 -thank 8 -escort 8 -answering 8 -especially 8 -gracefully 8 -extensive 8 -hyrdant 8 -jumble 8 -fueling 8 -tipping 8 -afar 8 -swirled 8 -consume 8 -lilacs 8 -mower 8 -50th 8 -sheers 8 -drape 8 -frizbe 8 -executive 8 -mints 8 -punching 8 -chargers 8 -guinness 8 -facet 8 -jello 8 -advertise 8 -tim 8 -memory 8 -realistic 8 -ruck 8 -hull 8 -cockatoo 8 -mallet 8 -flask 8 -waxed 8 -khakis 8 -bard 8 -stitched 8 -manipulating 8 -paused 8 -tap 8 -barns 8 -reindeer 8 -gnawing 8 -wrought-iron 7 -dinosaurs 7 -tulip 7 -effects 7 -tallest 7 -honk 7 -open-air 7 -exact 7 -talbot 7 -consoles 7 -webpage 7 -milked 7 -majestically 7 -situation 7 -edged 7 -singapore 7 -stagecoach 7 -foreheads 7 -corrected 7 -stylus 7 -operation 7 -carry-on 7 -removable 7 -motors 7 -and/or 7 -wrinkles 7 -clasping 7 -bred 7 -newest 7 -problem 7 -outlets 7 -gardens 7 -wii-mote 7 -indicated 7 -convenient 7 -disembarking 7 -cost 7 -puffed 7 -chic 7 -barnyard 7 -stooping 7 -thumbs-up 7 -battleship 7 -anytime 7 -mittens 7 -imitating 7 -cream-colored 7 -manhattan 7 -avid 7 -cradle 7 -sorry 7 -berlin 7 -budding 7 -giraff 7 -houseplants 7 -subs 7 -meander 7 -bog 7 -fingertips 7 -stitch 7 -vender 7 -malaysian 7 -spans 7 -coco 7 -beachside 7 -alfalfa 7 -preserver 7 -compound 7 -needing 7 -obstructed 7 -drifts 7 -spikes 7 -concoction 7 -treeline 7 -composed 7 -prizes 7 -amazon 7 -flair 7 -approached 7 -recess 7 -monopoly 7 -boundary 7 -widescreen 7 -sisters 7 -teeshirt 7 -classical 7 -domino 7 -trams 7 -flotation 7 -pounce 7 -leaguer 7 -keypad 7 -pedaling 7 -columned 7 -rom 7 -entertain 7 -witch 7 -plying 7 -grime 7 -manikin 7 -stroking 7 -cupping 7 -director 7 -donations 7 -googles 7 -vespas 7 -binders 7 -friday 7 -sole 7 -wastebasket 7 -impressed 7 -dragged 7 -wording 7 -explore 7 -antennae 7 -ringed 7 -leathers 7 -buddhist 7 -usage 7 -sterile 7 -dangled 7 -eatting 7 -tequila 7 -gauge 7 -gauze 7 -crutches 7 -protester 7 -forage 7 -inscribed 7 -steaks 7 -obscures 7 -breasted 7 -misshapen 7 -sideburns 7 -cheers 7 -barbecued 7 -puzzled 7 -brimmed 7 -slipper 7 -grace 7 -condom 7 -firm 7 -moto 7 -primarily 7 -corsage 7 -code 7 -sharply 7 -trumpet 7 -allot 7 -merge 7 -straining 7 -colleagues 7 -hops 7 -email 7 -atlanta 7 -paving 7 -unplugged 7 -expo 7 -funnel 7 -harvested 7 -glimpse 7 -bunnies 7 -hummus 7 -themself 7 -pinstriped 7 -depict 7 -eps 7 -lapsed 7 -grizzle 7 -pays 7 -physical 7 -trial 7 -breeds 7 -skidding 7 -multi-tiered 7 -vert 7 -spaceous 7 -stand-up 7 -hurry 7 -bridles 7 -corgi 7 -brussels 7 -heritage 7 -politicians 7 -baron 7 -click 7 -sprinkler 7 -approval 7 -addition 7 -brunch 7 -illusion 7 -gerbil 7 -pilled 7 -onstage 7 -braids 7 -motorist 7 -sanitizer 7 -clementine 7 -staples 7 -insides 7 -sunday 7 -pint 7 -thorny 7 -cracks 7 -rancher 7 -disconnected 7 -dole 7 -roams 7 -tortoise 7 -del 7 -stepped 7 -luke 7 -pagoda 7 -spend 7 -roping 7 -severely 7 -drank 7 -announcer 7 -afield 7 -panes 7 -time-lapse 7 -wide-eyed 7 -sheeted 7 -ambulances 7 -hot-dog 7 -sharpie 7 -zipping 7 -technological 7 -intertwined 7 -garnishes 7 -nw 7 -partitions 7 -ballroom 7 -granddaughter 7 -treading 7 -hub 7 -dandelions 7 -ot 7 -simpsons 7 -mission 7 -modernized 7 -unhealthy 7 -tarps 7 -knickknacks 7 -birch 7 -joining 7 -steve 7 -disposal 7 -puppets 7 -makings 7 -steams 7 -sparrows 7 -junky 7 -motorcyles 7 -incredibly 7 -cutters 7 -gelato 7 -showed 7 -amusing 7 -landmark 7 -skimpy 7 -bid 7 -wiht 7 -glacier 7 -multistory 7 -parachuting 7 -brilliant 7 -diagram 7 -zippered 7 -fleece 7 -fishermen 7 -coveralls 7 -fetching 7 -continue 7 -wipe 7 -pawn 7 -brahma 7 -presidential 7 -trike 7 -bonds 7 -basking 7 -coals 7 -slathered 7 -catamaran 7 -confinement 7 -motocycle 7 -highlights 7 -sprints 7 -w. 7 -aim 7 -launched 7 -farther 7 -/ 7 -division 7 -gnarly 7 -cal 7 -flank 7 -dread 7 -fifty 7 -cannon 7 -skatboard 7 -partially-eaten 7 -urine 7 -reason 7 -overexposed 7 -diorama 7 -chiquita 7 -invisible 7 -jeff 7 -scrunched 7 -smells 7 -fishnet 7 -hitching 7 -maze 7 -cartoonish 7 -holstein 7 -partners 7 -eccentric 7 -poultry 7 -astounding 7 -inset 7 -haze 7 -somber 7 -programming 7 -overtaken 7 -stoic 7 -girraffe 7 -sown 7 -decided 7 -valentines 7 -manager 7 -battling 7 -strokes 7 -listing 7 -cough 7 -broadcast 7 -13 7 -pill 7 -grin 7 -bible 7 -escorted 7 -slips 7 -curio 7 -jointly 7 -furnace 7 -bluff 7 -venturing 7 -kilts 7 -shallows 7 -binoculars 7 -adn 7 -hitch 7 -stride 7 -eyeballs 7 -vapor 7 -glances 7 -roles 7 -cultural 7 -malaysia 7 -crews 7 -gin 7 -begs 7 -bareback 7 -turntable 7 -persona 7 -informing 7 -crush 7 -knows 7 -wrestles 7 -explains 7 -saloon 7 -refrigeration 7 -literature 7 -baggy 7 -ottomans 7 -dramatically 7 -anywhere 7 -haight 7 -horizontally 7 -frontier 7 -swift 7 -coached 7 -cocker 7 -artifacts 7 -chugging 7 -2nd 7 -ease 7 -bluebird 7 -offer 7 -unaware 7 -peope 7 -bluish 7 -ridiculous 7 -rained 7 -driftwood 7 -alfredo 7 -doodling 7 -philadelphia 7 -dispensing 7 -darker 7 -snowball 7 -brooklyn 7 -buoys 7 -arching 7 -sandal 7 -zip 7 -servers 7 -skimming 7 -girrafe 7 -centers 7 -purposes 7 -pieced 7 -autograph 7 -derelict 7 -autographed 7 -biplanes 7 -familiar 7 -hauled 7 -jalapeno 7 -beginner 7 -saver 7 -chapel 7 -pierced 7 -shear 7 -network 7 -sunning 7 -licked 7 -ted 7 -onboard 7 -four-wheeler 7 -dreads 7 -online 7 -busily 7 -title 7 -adjusted 7 -cosmetic 7 -terrible 7 -philly 7 -sponsored 7 -collectible 7 -ballon 7 -u-turn 7 -bleacher 7 -craggy 7 -helicopters 7 -black-faced 7 -coating 7 -glazing 7 -spouts 7 -maintain 7 -dougnuts 7 -hoody 7 -tye 7 -pinning 7 -smokey 7 -die 7 -useful 7 -coaching 7 -correct 7 -pained 7 -organizing 7 -celebrity 7 -nineteen 7 -frilly 7 -bathes 7 -crawls 7 -trees.. 7 -precariously 7 -folds 7 -peaked 7 -cubed 7 -overhanging 7 -clementines 7 -rearing 7 -highland 7 -homey 7 -whiile 7 -beaked 7 -brides 7 -refurbished 7 -glory 7 -supreme 7 -leggings 7 -smoky 7 -kicked 7 -inscription 7 -choir 7 -clothe 7 -ran 7 -chariot 7 -vulture 7 -review 7 -postage 7 -bizarre 7 -anniversary 7 -challenging 7 -squints 7 -dumb 7 -distinctive 7 -lodged 7 -sharpener 7 -initials 7 -hurdles 7 -trap 7 -succulents 7 -closes 7 -elbows 7 -soccerball 7 -headphone 7 -broadly 7 -scraper 6 -bannanas 6 -alternative 6 -levitating 6 -spin 6 -goofing 6 -elders 6 -benched 6 -duel 6 -rippling 6 -crowns 6 -palms 6 -crater 6 -lit-up 6 -minnie 6 -admired 6 -fueled 6 -sorted 6 -ont 6 -jewels 6 -recycled 6 -plumes 6 -bubbly 6 -unrecognizable 6 -fluffed 6 -scott 6 -terra 6 -benedict 6 -moderately 6 -restrooms 6 -surveying 6 -crook 6 -scatter 6 -tonight 6 -taps 6 -jay 6 -jal 6 -rapped 6 -> 6 -mementos 6 -recovery 6 -production 6 -two-tiered 6 -slew 6 -reserve 6 -skiiing 6 -emu 6 -techniques 6 -shields 6 -candlesticks 6 -steady 6 -pillowcases 6 -arctic 6 -backup 6 -treks 6 -communicating 6 -ipads 6 -kitcehn 6 -feels 6 -technicians 6 -informal 6 -umbrealla 6 -michael 6 -ronald 6 -airborn 6 -headscarf 6 -tuned 6 -strains 6 -cell-phone 6 -emblems 6 -lose 6 -trucker 6 -universal 6 -fists 6 -void 6 -snowsuits 6 -ventilation 6 -ducking 6 -merchants 6 -hawks 6 -sleeper 6 -sever 6 -contestant 6 -margherita 6 -shakespeare 6 -oreo 6 -sweating 6 -stoops 6 -meme 6 -gel 6 -capable 6 -motorcade 6 -naan 6 -pastures 6 -railed 6 -designating 6 -commute 6 -meowing 6 -karaoke 6 -sucks 6 -viewers 6 -bandaged 6 -extraordinary 6 -bassinet 6 -witches 6 -ktichen 6 -valleys 6 -vietnamese 6 -assisted 6 -barley 6 -fourteen 6 -mortar 6 -ramping 6 -payment 6 -chilies 6 -rump 6 -cathay 6 -happens 6 -screwed 6 -nathans 6 -feathery 6 -limousine 6 -saber 6 -overheard 6 -attractions 6 -fantasy 6 -handkerchief 6 -thorough 6 -minnesota 6 -despite 6 -toolbox 6 -solders 6 -shortly 6 -bathmat 6 -duke 6 -loitering 6 -whle 6 -minimalistic 6 -cautionary 6 -lentils 6 -fastball 6 -okay 6 -processed 6 -tassels 6 -wilson 6 -populate 6 -gains 6 -blurs 6 -bibs 6 -bodyboard 6 -kiteboards 6 -dudes 6 -homework 6 -vegitables 6 -johns 6 -users 6 -app 6 -cheerleader 6 -refigerator 6 -cotta 6 -well-organized 6 -unclear 6 -hosts 6 -yorkie 6 -spatulas 6 -figs 6 -fritter 6 -glassy 6 -spoke 6 -telegraph 6 -lovingly 6 -captioned 6 -piggy 6 -fail 6 -entwined 6 -prevent 6 -snaps 6 -creations 6 -vote 6 -combines 6 -skate-park 6 -souvenirs 6 -af 6 -beaming 6 -candid 6 -enjoyable 6 -deviled 6 -houseplant 6 -immediate 6 -sideboard 6 -hollow 6 -architecturally 6 -sighn 6 -buttery 6 -overtop 6 -overgrowth 6 -benz 6 -blow-drying 6 -mechanism 6 -manchester 6 -bourbon 6 -laser 6 -rigged 6 -criss 6 -pursuit 6 -secure 6 -indians 6 -indiana 6 -ruffle 6 -swimwear 6 -chimneys 6 -out-of-focus 6 -tee-shirt 6 -graph 6 -blast 6 -halo 6 -acknowledging 6 -13th 6 -octagonal 6 -menacingly 6 -torches 6 -mph 6 -chunky 6 -sweeps 6 -motorcross 6 -soapy 6 -dunk 6 -jordan 6 -bathtub/shower 6 -ciabatta 6 -belong 6 -shuttered 6 -skat 6 -requires 6 -focuses 6 -statutes 6 -spelling 6 -stading 6 -resident 6 -sonic 6 -eatables 6 -notices 6 -hp 6 -skatebaord 6 -colourful 6 -frisbee-based 6 -coup 6 -luck 6 -struts 6 -burn 6 -rockaway 6 -woodwork 6 -skim 6 -pattered 6 -grained 6 -birdbath 6 -drizzling 6 -browning 6 -gyro 6 -according 6 -caricature 6 -barry 6 -receptacle 6 -open-faced 6 -tackle 6 -represent 6 -storks 6 -none 6 -cocking 6 -beret 6 -netted 6 -swishing 6 -monroe 6 -leafed 6 -rears 6 -shouting 6 -pew 6 -fogged 6 -golfer 6 -hilton 6 -retrieves 6 -receipts 6 -chats 6 -enthusiastic 6 -haunches 6 -barbed-wire 6 -brands 6 -semis 6 -hikes 6 -scribbled 6 -pats 6 -mt 6 -flippers 6 -enterprise 6 -interviewing 6 -pinking 6 -campbell 6 -bands 6 -heap 6 -ny 6 -thirty 6 -threes 6 -kerry 6 -budweiser 6 -pokemon 6 -surveys 6 -ability 6 -hardly 6 -op 6 -grungy 6 -bounded 6 -measured 6 -penalty 6 -forlorn 6 -plateau 6 -waiters 6 -barricaded 6 -nostalgic 6 -disarray 6 -boring 6 -sittign 6 -combat 6 -braid 6 -blanketed 6 -pods 6 -irises 6 -sailer 6 -off-white 6 -omelets 6 -easier 6 -buiding 6 -planner 6 -gleaming 6 -hookah 6 -turret 6 -stature 6 -monitoring 6 -mime 6 -addressing 6 -brocoli 6 -missiles 6 -amused 6 -gingham 6 -rummaging 6 -pearl 6 -refuse 6 -laced 6 -tomatoe 6 -accepting 6 -chowder 6 -euro 6 -rainbow-colored 6 -harvesting 6 -graceful 6 -crinkle 6 -cheetos 6 -knotted 6 -eva 6 -slit 6 -element 6 -snowfall 6 -shacks 6 -flesh 6 -bulidings 6 -placemats 6 -encompassing 6 -savory 6 -raggedy 6 -blue-green 6 -applies 6 -certainly 6 -para-surfing 6 -yams 6 -browses 6 -slap 6 -] 6 -ruby 6 -earring 6 -showerhead 6 -skinning 6 -fanning 6 -idling 6 -lacking 6 -high-speed 6 -started 6 -sprouting 6 -bucks 6 -buldings 6 -attractively 6 -hush 6 -advancing 6 -aging 6 -wieners 6 -don 6 -offerings 6 -inspected 6 -bounds 6 -cessna 6 -keepers 6 -15 6 -14 6 -17 6 -bowels 6 -dick 6 -787 6 -triumphantly 6 -compilation 6 -abandon 6 -supposed 6 -mouthful 6 -motionless 6 -circling 6 -room/dining 6 -pails 6 -desktops 6 -flanking 6 -poached 6 -institutional 6 -courtroom 6 -blazing 6 -salesman 6 -matt 6 -beachgoers 6 -constructing 6 -waterhole 6 -quad 6 -spitting 6 -preparations 6 -40 6 -41 6 -solemn 6 -stray 6 -whizzes 6 -multi-level 6 -manicure 6 -differnt 6 -grassed 6 -pondering 6 -southeast 6 -gamer 6 -trust 6 -windup 6 -procedure 6 -suburb 6 -cylinders 6 -footboard 6 -foldable 6 -straddles 6 -hunches 6 -slippery 6 -corks 6 -kleenex 6 -snowmen 6 -liter 6 -uncommonly 6 -freshener 6 -heaping 6 -congregated 6 -chees 6 -womens 6 -hobby 6 -posture 6 -possession 6 -pecans 6 -neighboring 6 -bulging 6 -kawasaki 6 -dynamite 6 -occupying 6 -incomplete 6 -collectibles 6 -manufacturing 6 -emits 6 -lunge 6 -funeral 6 -converses 6 -boad 6 -fame 6 -bandage 6 -neighbor 6 -dented 6 -graffiti-covered 6 -vie 6 -stnading 6 -nibble 6 -crossword 6 -interface 6 -pda 6 -loan 6 -doo 6 -insignia 6 -furthest 6 -chickpeas 6 -singular 6 -volunteers 6 -accompany 6 -deodorant 6 -h 6 -building.. 6 -eof 6 -lad 6 -wiffle 6 -eventually 6 -bolted 6 -toronto 6 -forces 6 -tough 6 -swell 6 -hopping 6 -couscous 6 -schoolbus 6 -street.. 6 -assistance 6 -buck 6 -slogan 6 -hotplate 6 -identically 6 -idyllic 6 -james 6 -tie-dyed 6 -comparison 6 -gump 6 -exceptionally 6 -game.. 6 -ledges 6 -tried 6 -payer 6 -plungers 6 -splattered 6 -bowel 6 -sailboard 6 -labeling 6 -multi-lane 6 -perked 6 -mauve 6 -outing 6 -inspection 6 -becomes 6 -semi-circle 6 -showering 6 -strapping 6 -three-tiered 6 -tabe 6 -joins 6 -holidng 6 -spandex 6 -shes 6 -connects 6 -digitally 6 -bonsai 6 -longingly 6 -restricted 6 -peculiar 6 -tapping 6 -smoker 6 -chives 6 -well-furnished 6 -deformed 6 -fribee 6 -nativity 6 -slept 6 -dangerously 6 -pegs 6 -warplane 6 -distinct 6 -stacking 6 -response 6 -possessions 6 -squished 6 -blossoming 6 -statement 6 -dodging 6 -smallest 6 -marilyn 6 -preforms 6 -flurry 6 -flowerpot 6 -arrange 6 -shock 6 -presidents 6 -tearing 6 -lapt 6 -sayings 6 -masts 6 -routine 6 -clinic 6 -crossroad 6 -alive 6 -noise 6 -dinging 6 -enclosing 6 -christ 6 -lampshade 6 -actions 6 -poked 6 -mallard 6 -rad 6 -cookout 6 -carseat 6 -blower 6 -altar 6 -collision 6 -motions 6 -bolts 6 -tanning 6 -pakistan 6 -kiln 6 -lack 6 -branded 6 -stethoscope 6 -backpacking 6 -tankless 6 -combed 6 -arc 6 -glistening 6 -rip 6 -currents 6 -nosing 6 -half-filled 6 -shoved 6 -brochure 6 -bye 6 -myspace 6 -pealed 6 -divers 6 -tolit 6 -fied 6 -readied 6 -leak 6 -murdered 6 -broiler 5 -broiled 5 -climbed 5 -schoolboys 5 -copse 5 -sixteen 5 -competes 5 -semi-trailer 5 -kfc 5 -reuben 5 -scream 5 -teresa 5 -pontoons 5 -umbilical 5 -spit 5 -canoeing 5 -waterboard 5 -municipal 5 -confection 5 -treadmill 5 -calzones 5 -pasadena 5 -balled 5 -skewer 5 -inquisitively 5 -zippers 5 -blowup 5 -drilling 5 -sponsor 5 -aggressive 5 -guarded 5 -workplace 5 -dark-colored 5 -rover 5 -thailand 5 -crosswalks 5 -portrays 5 -frumpled 5 -scramble 5 -overcooked 5 -schoolhouse 5 -colleague 5 -frizzy 5 -courses 5 -delicately 5 -frito 5 -clasped 5 -underbelly 5 -inserting 5 -wafer 5 -totes 5 -survey 5 -boutique 5 -stab 5 -gallons 5 -false 5 -dished 5 -romaine 5 -aer 5 -nuclear 5 -fielding 5 -villagers 5 -hissing 5 -worth 5 -basebal 5 -sprig 5 -doge 5 -bracket 5 -panned 5 -visited 5 -attacked 5 -crockery 5 -amoco 5 -congratulate 5 -peep 5 -gutting 5 -dark-haired 5 -ibm 5 -ended 5 -soy 5 -overnight 5 -winks 5 -cherub 5 -beaver 5 -metered 5 -biathlon 5 -mid-stride 5 -moonlight 5 -thoughtful 5 -picnicking 5 -gren 5 -rockefeller 5 -porcupine 5 -culture 5 -kneeled 5 -petal 5 -watered 5 -all-white 5 -embellished 5 -3/4 5 -frontal 5 -bitch 5 -snowed 5 -well-used 5 -limits 5 -troopers 5 -taupe 5 -astro 5 -functions 5 -manuals 5 -aids 5 -umbella 5 -traverses 5 -stooges 5 -troop 5 -buries 5 -ting 5 -well-kept 5 -midway 5 -jacked 5 -sittiing 5 -fillet 5 -boa 5 -crevice 5 -set-up 5 -nazi 5 -reef 5 -seas 5 -coupons 5 -loosing 5 -escalators 5 -uhaul 5 -distracted 5 -bow-tie 5 -youtube 5 -lving 5 -preserves 5 -preserved 5 -brackets 5 -specialized 5 -junked 5 -acts 5 -worshiping 5 -mystery 5 -tom 5 -weenies 5 -sup 5 -doubled 5 -spiked 5 -generator 5 -trainyard 5 -skiff 5 -planet 5 -portraying 5 -stree 5 -hippopotamus 5 -throat 5 -prancing 5 -pulp 5 -newscast 5 -weighed 5 -closets 5 -ravine 5 -wreckage 5 -seductively 5 -bulbous 5 -fettuccine 5 -stirrer 5 -@ 5 -glued 5 -tinsel 5 -settled 5 -distributing 5 -attract 5 -drummer 5 -alight 5 -anxiously 5 -dads 5 -nerdy 5 -zodiac 5 -cobbler 5 -flusher 5 -tested 5 -deliveries 5 -suckles 5 -wallets 5 -nunchuk 5 -durham 5 -representation 5 -superman 5 -offices 5 -participant 5 -squating 5 -shoeless 5 -woma 5 -horsed 5 -argyle 5 -bout 5 -hunter 5 -tongues 5 -grazed 5 -defense 5 -sititng 5 -liquids 5 -ballgame 5 -warmed 5 -kitchenware 5 -folk 5 -19th 5 -chose 5 -suggests 5 -pajama 5 -bindings 5 -cursive 5 -priority 5 -trespassing 5 -monsters 5 -backback 5 -wonders 5 -pasty 5 -torch 5 -garbanzo 5 -shielding 5 -miller 5 -dork 5 -financial 5 -shopper 5 -reclined 5 -paddington 5 -contestants 5 -writting 5 -joust 5 -lushly 5 -cellophane 5 -dealer 5 -actor 5 -visual 5 -dwarfs 5 -suggesting 5 -managing 5 -clawing 5 -carolina 5 -wielding 5 -bros 5 -skied 5 -clam 5 -joyfully 5 -congress 5 -cpu 5 -accompanying 5 -sync 5 -threatening 5 -appreciation 5 -marriage 5 -plae 5 -condos 5 -vitamins 5 -roofing 5 -dotting 5 -choking 5 -pikachu 5 -shoestring 5 -crooks 5 -bu 5 -bp 5 -jewish 5 -results 5 -index 5 -residents 5 -mariners 5 -boxy 5 -drug 5 -ct 5 -cradling 5 -variegated 5 -swirls 5 -sellers 5 -hairdo 5 -glance 5 -brightly-colored 5 -chilled 5 -wide-open 5 -incorporated 5 -strength 5 -ds 5 -1971 5 -fairground 5 -scaffold 5 -waterski 5 -foreclosure 5 -domes 5 -seuss 5 -veteran 5 -dressage 5 -free-standing 5 -coffeemaker 5 -motorcylces 5 -puree 5 -recorded 5 -bathtubs 5 -stealth 5 -splitting 5 -headgear 5 -stirfry 5 -injury 5 -lacks 5 -perfume 5 -scanning 5 -outboard 5 -manning 5 -shortcake 5 -climbers 5 -rulers 5 -brakes 5 -trendy 5 -chromed 5 -11th 5 -rotted 5 -missile 5 -graffit 5 -cots 5 -mode 5 -fingerling 5 -attach 5 -kitties 5 -plugging 5 -ripen 5 -flexing 5 -decline 5 -hd 5 -mechanics 5 -teacups 5 -expose 5 -shelters 5 -motoring 5 -wiith 5 -collector 5 -reference 5 -backhoe 5 -sprinklers 5 -azure 5 -carting 5 -stapled 5 -seawall 5 -armchairs 5 -unfrosted 5 -streeet 5 -sittingon 5 -tun 5 -bmx 5 -inbetween 5 -backround 5 -chamber 5 -seconds 5 -autism 5 -stemware 5 -compare 5 -potatos 5 -drainer 5 -petite 5 -florescent 5 -seeking 5 -clamp 5 -participants 5 -bistro 5 -entrancing 5 -corkscrew 5 -utilizes 5 -plows 5 -peple 5 -commodes 5 -rowed 5 -understand 5 -itch 5 -assignment 5 -peg 5 -foot-long 5 -kayaker 5 -oils 5 -thatch 5 -daring 5 -winners 5 -lg 5 -poeple 5 -brownstone 5 -airports 5 -treys 5 -ashtray 5 -weapon 5 -raven 5 -wise 5 -fishbowl 5 -deers 5 -entertainer 5 -taxiway 5 -sparkles 5 -geek 5 -twice 5 -gladiator 5 -360 5 -uncommon 5 -kingfisher 5 -utilitarian 5 -papaya 5 -sanitary 5 -umping 5 -ne 5 -dappled 5 -towl 5 -conversations 5 -opera 5 -begun 5 -attracted 5 -cloves 5 -sidecars 5 -littering 5 -applauding 5 -simmering 5 -puncher 5 -buddies 5 -waiving 5 -communicate 5 -idiot 5 -tablecloths 5 -hoisting 5 -knelling 5 -hexagonal 5 -enticing 5 -securing 5 -korea 5 -flier 5 -hooding 5 -associated 5 -kitche 5 -skyteam 5 -reds 5 -boasts 5 -fez 5 -cocked 5 -emerald 5 -phrases 5 -rigs 5 -sanitizers 5 -shedding 5 -bronx 5 -high-rise 5 -raincoats 5 -herons 5 -supple 5 -foul 5 -foundation 5 -tempting 5 -lifejacket 5 -insects 5 -tolet 5 -craning 5 -trombone 5 -canopied 5 -otter 5 -3-way 5 -lollipops 5 -'a 5 -cleaver 5 -bicycler 5 -detached 5 -stole 5 -sw 5 -televison 5 -roosts 5 -israeli 5 -winters 5 -mand 5 -snowbank 5 -intel 5 -inter 5 -mna 5 -hooking 5 -sucker 5 -compartmentalized 5 -coors 5 -bulky 5 -composting 5 -contentedly 5 -readers 5 -sprint 5 -storing 5 -highways 5 -delicous 5 -richmond 5 -midday 5 -2010 5 -prarie 5 -expectantly 5 -bare-chested 5 -carraige 5 -vace 5 -courtesy 5 -smash 5 -astroturf 5 -rhode 5 -dhl 5 -cedar 5 -shoppe 5 -vignette 5 -chill 5 -bubbling 5 -offshore 5 -vents 5 -spigot 5 -townhouses 5 -latter 5 -ocean.. 5 -cardinals 5 -wo 5 -garnishing 5 -plateful 5 -manuever 5 -eying 5 -beating 5 -accepts 5 -wan 5 -true 5 -computing 5 -white-tiled 5 -baggie 5 -sequential 5 -muslim 5 -cam 5 -ducati 5 -stevens 5 -maine 5 -figuring 5 -juggles 5 -unattached 5 -taker 5 -sneak 5 -streetlamp 5 -dominates 5 -labs 5 -banans 5 -udders 5 -restuarant 5 -randy 5 -meandering 5 -canals 5 -handy 5 -explores 5 -moths 5 -sanwich 5 -kings 5 -journal 5 -stumps 5 -smack 5 -graces 5 -stealing 5 -mind 5 -mine 5 -explain 5 -stabbing 5 -monica 5 -decides 5 -gushes 5 -cieling 5 -opportunity 5 -impending 5 -mouthed 5 -forked 5 -mussels 5 -somersault 5 -arbor 5 -participates 5 -mermaid 5 -cocktails 5 -teething 5 -red-headed 5 -cabana 5 -shingled 5 -carring 5 -gril 5 -romantic 5 -worms 5 -sunk 5 -hideous 5 -grasshopper 5 -hunching 5 -sphere 5 -preening 5 -gravelly 5 -muzzled 5 -29 5 -unbrellas 5 -rainforest 5 -dont 5 -idles 5 -majority 5 -contorted 5 -collapsed 5 -barrack 5 -reverse 5 -gag 5 -salutes 5 -coil 5 -motorboats 5 -31 5 -gateway 5 -reserved 5 -butternut 5 -marshmallow 5 -tubing 5 -interaction 5 -chomping 5 -completes 5 -humped 5 -signifying 5 -dryers 5 -well-worn 5 -propel 5 -although 5 -in-between 5 -42 5 -limited 5 -formica 5 -tweezers 5 -snarling 5 -oysters 5 -swirling 5 -toa 5 -10th 5 -buildings.. 5 -paddleboarding 5 -bellowing 5 -kentucky 5 -boater 5 -greenwich 5 -trashed 5 -hunk 5 -doorstep 5 -select 5 -wildflower 5 -sheds 5 -counting 5 -oasis 5 -calories 5 -nathan 5 -phillips 5 -nectarine 5 -nissan 5 -swerving 5 -cheesey 5 -faint 5 -life-sized 5 -glad 5 -catalog 5 -nights 5 -skyward 5 -collaboration 5 -carport 5 -tabled 5 -entangled 5 -strands 5 -x-ray 5 -breathing 5 -headless 5 -strategically 5 -blustery 5 -flew 5 -rockets 5 -lilac 5 -80 5 -maintains 5 -snowcapped 5 -ashbury 5 -roadwork 5 -mike 5 -gutted 5 -miami 5 -tumble 5 -detergent 5 -yankee 5 -nigh 5 -sittting 5 -hoe 5 -haphazardly 5 -snakes 5 -ranging 5 -paddled 5 -result 5 -in-flight 5 -medallion 5 -t.v.v 5 -kneading 5 -toppled 5 -peopl 5 -oregon 5 -evidently 5 -fluted 5 -router 5 -artifact 5 -twinkies 5 -sas 5 -peice 5 -tears 5 -dormitory 5 -loafs 5 -coasting 5 -dressy 5 -ghostly 5 -remolded 5 -leroy 5 -loosely 5 -unmanned 5 -oyster 5 -defender 5 -weaved 5 -flatware 5 -imposed 5 -sneaks 5 -illustrated 5 -dainty 5 -life-size 5 -involves 5 -dicing 5 -oh 5 -pyramids 5 -frittata 5 -grandpa 5 -well-decorated 5 -autos 5 -scrolls 5 -cut-up 5 -whtie 5 -zips 5 -furred 5 -overlapping 5 -goalkeeper 5 -egrets 5 -tasble 5 -clocked 5 -walsk 5 -oxygen 5 -bunkbed 5 -speedboats 5 -circled 5 -sewn 5 -speared 5 -termite 5 -humor 5 -freestanding 5 -boombox 5 -peeler 5 -nemo 5 -umpires 5 -expertly 5 -whippet 5 -enjoyment 5 -parasurfer 5 -charged 5 -sweatpants 5 -staging 5 -squirt 5 -kichen 5 -restraunt 5 -condensation 5 -rain-covered 5 -modular 5 -shuffle 5 -rotini 5 -woodsy 5 -malnourished 5 -reservoir 5 -roomful 5 -contorts 5 -bowed 5 -scotland 5 -flights 5 -winking 5 -buisness 5 -roaster 5 -reel 5 -motorcyclers 5 -william 5 -expressway 5 -brook 5 -seniors 5 -grayish 5 -creamer 5 -scrolling 5 -allover 5 -taping 5 -prohibited 5 -torsos 5 -staked 5 -eyebrows 5 -curiosity 5 -waited 5 -carve 5 -workout 5 -sparkly 5 -swiping 5 -scrubbing 5 -ollies 5 -unenthused 5 -sepia-toned 5 -servicemen 5 -arguing 5 -bolt 5 -themes 5 -villa 5 -phillies 5 -nyc 5 -shower/tub 5 -americans 5 -occupies 5 -cologne 5 -pebble 5 -turtles 5 -chocolate-covered 5 -meanders 5 -clawfoot 5 -conducting 5 -practical 5 -washers 5 -departs 5 -parcel 5 -earing 5 -flute 5 -tarmack 5 -tarmacs 5 -annual 5 -pavers 5 -entre 5 -backflip 5 -wayland 5 -'ve 5 -saab 5 -migrating 5 -liners 5 -2009 5 -blaze 5 -ingredient 5 -unpacking 5 -fringe 5 -loved 5 -wetland 5 -monstrosity 5 -homer 5 -cereals 5 -stuffs 5 -highlighting 5 -corpse 5 -awarded 5 -editing 5 -surreal 5 -d'oeuvres 5 -assembles 5 -hating 5 -coasters 5 -bloomed 5 -tampa 5 -henry 5 -glob 5 -confident 5 -workings 5 -charlie 5 -presence 5 -carafe 5 -patrols 5 -moms 5 -molded 5 -moutains 5 -blue-eyed 5 -skillets 5 -fiddling 5 -onesie 5 -screenshot 5 -'do 5 -violent 5 -intact 5 -severed 5 -cornfield 5 -thrift 5 -forcefully 5 -crested 5 -berth 5 -sunscreen 5 -nonchalantly 5 -speedometer 5 -script 5 -gleefully 5 -option 5 -cleverly 5 -knocking 5 -whoa 5 -engineers 5 -shamrock 5 -negotiate 5 -old-time 5 -fremont 5 -littel 5 -chars 5 -dill 5 -headbands 5 -afraid 5 -oozing 5 -clogged 5 -graveled 5 -rocker 5 -surge 5 -rear-view 5 -bratwurst 5 -'ll 5 -wheelers 5 -illegally 5 -chins 4 -spotty 4 -yahoo 4 -cookers 4 -snugly 4 -undeveloped 4 -handcuffs 4 -wholesome 4 -altogether 4 -suticase 4 -cordoned 4 -lookin 4 -paraglider 4 -marches 4 -boiler 4 -petaled 4 -bellow 4 -slivers 4 -maxwell 4 -tame 4 -denoting 4 -ointment 4 -whoever 4 -mailboxes 4 -quartet 4 -saving 4 -veldt 4 -sprawls 4 -simplistic 4 -pimp 4 -straighten 4 -tracking 4 -steamer 4 -lively 4 -rumbles 4 -wits 4 -witb 4 -passerby 4 -corked 4 -hounds 4 -denotes 4 -airway 4 -burried 4 -sometimes 4 -cassette 4 -iphones 4 -treatment 4 -bouncy 4 -measurements 4 -concealed 4 -melbourne 4 -chirping 4 -iin 4 -carelessly 4 -babe 4 -placard 4 -tightrope 4 -bilingual 4 -houston 4 -frowns 4 -snow-packed 4 -backless 4 -receding 4 -vigorous 4 -quarry 4 -potties 4 -incense 4 -mover 4 -innings 4 -sunshades 4 -shotgun 4 -wee 4 -stooped 4 -tick 4 -tubular 4 -searches 4 -progression 4 -ocean-side 4 -jamaica 4 -otters 4 -bared 4 -fastening 4 -firehose 4 -ems 4 -dustbin 4 -sissors 4 -tony 4 -pees 4 -enthused 4 -franklin 4 -sake 4 -varnished 4 -disgusted 4 -grandson 4 -mr. 4 -landline 4 -soldering 4 -obscuring 4 -african-american 4 -tilling 4 -sawdust 4 -bagpipes 4 -headpiece 4 -assemble 4 -snooze 4 -monte 4 -dumpling 4 -decide 4 -steadying 4 -learned 4 -ninja 4 -gree 4 -randomly 4 -kales 4 -locale 4 -zipped 4 -lives 4 -smacked 4 -communal 4 -composition 4 -yolk 4 -unbrella 4 -taillights 4 -muffs 4 -vampire 4 -manage 4 -kitesurfer 4 -visibility 4 -fare 4 -stenciled 4 -paddleboard 4 -bluetooth 4 -polka-dotted 4 -tolkien 4 -brighten 4 -tens 4 -ken 4 -keg 4 -hurrying 4 -run-down 4 -propelled 4 -hurls 4 -painters 4 -collaborate 4 -snow-filled 4 -sittng 4 -glazes 4 -gargoyle 4 -skins 4 -ihop 4 -zealand 4 -saturn 4 -presumably 4 -tudor 4 -roosting 4 -confrontation 4 -seabird 4 -sprigs 4 -braking 4 -utilities 4 -weekend 4 -pasenger 4 -freestyle 4 -grouo 4 -memo 4 -voodoo 4 -enthusiasm 4 -blends 4 -judging 4 -slapping 4 -sights 4 -raptor 4 -zero 4 -residue 4 -all-way 4 -rainstorm 4 -unsliced 4 -emaciated 4 -messes 4 -water.. 4 -embraces 4 -fiction 4 -tricycles 4 -trudges 4 -converge 4 -choco 4 -forty-five 4 -suede 4 -screws 4 -parfait 4 -timber 4 -cud 4 -mrs. 4 -motivational 4 -restoration 4 -spicket 4 -beasts 4 -varies 4 -iguana 4 -sapling 4 -backboard 4 -dixie 4 -workbook 4 -vagina 4 -1960s 4 -ganache 4 -gingerly 4 -placidly 4 -equal 4 -garish 4 -posses 4 -topiary 4 -top.. 4 -parkland 4 -supplied 4 -beck 4 -tenders 4 -refection 4 -staining 4 -variation 4 -primary 4 -traditionally 4 -thoroughly 4 -derek 4 -peddle 4 -undressed 4 -hopefully 4 -blimp 4 -vulcan 4 -greeted 4 -focaccia 4 -pursues 4 -tshirts 4 -ppk 4 -abed 4 -striding 4 -sometime 4 -eerie 4 -sacks 4 -vietnam 4 -fasten 4 -impression 4 -boast 4 -variously 4 -cased 4 -cornflakes 4 -unloads 4 -woodpeckers 4 -stiing 4 -smokestack 4 -fury 4 -impaled 4 -claw-foot 4 -ninth 4 -nectarines 4 -humping 4 -aeroplanes 4 -longboarder 4 -porter 4 -chowing 4 -beaches 4 -burton 4 -pullman 4 -grasp 4 -semicircle 4 -cascading 4 -underhanded 4 -camcorder 4 -65th 4 -israel 4 -flan 4 -birdcages 4 -alighting 4 -retrievers 4 -commemorative 4 -knive 4 -aman 4 -lumbers 4 -cosplay 4 -precision 4 -parasurfing 4 -pegboard 4 -summertime 4 -departments 4 -leveled 4 -youngest 4 -distressed 4 -eagles 4 -antennas 4 -kitcken 4 -ricotta 4 -republican 4 -chaos 4 -sculpting 4 -snowplow 4 -hairstyle 4 -spirits 4 -fear 4 -sponsors 4 -neutrals 4 -coffeepot 4 -pasties 4 -transfer 4 -federer 4 -intimidating 4 -coffees 4 -toasts 4 -swivel 4 -avery 4 -vanities 4 -dumpy 4 -bater 4 -instruct 4 -shoveled 4 -videotaping 4 -white-walled 4 -caped 4 -'no 4 -lurches 4 -shadyside 4 -disaster 4 -girrafes 4 -vain 4 -toboggan 4 -ewes 4 -foo 4 -freesby 4 -mighty 4 -supporters 4 -veg 4 -veterans 4 -teat 4 -minion 4 -fingernail 4 -apparent 4 -locamotive 4 -decades 4 -disguised 4 -spam 4 -ski-lift 4 -lurking 4 -lettered 4 -bombs 4 -goup 4 -scears 4 -litte 4 -clap 4 -playin 4 -forest-like 4 -mimic 4 -installations 4 -yellowed 4 -westmark 4 -delapidated 4 -corrugated 4 -slipped 4 -livery 4 -abuilding 4 -muted 4 -splatters 4 -drunk 4 -d.c. 4 -tobacco 4 -recent 4 -clearance 4 -clutched 4 -ba 4 -bt 4 -specifically 4 -mediterranean 4 -crisscrossing 4 -seatbelt 4 -david 4 -spaceship 4 -buttocks 4 -soak 4 -shaven 4 -fronds 4 -superhero 4 -grafiti 4 -dazzling 4 -lust 4 -command 4 -teriyaki 4 -dyrgas 4 -thier 4 -alliance 4 -95th 4 -marlboro 4 -fireplaces 4 -bewildered 4 -churches 4 -studs 4 -chooses 4 -groves 4 -cabanas 4 -i. 4 -manned 4 -hydration 4 -riverbed 4 -du 4 -cog 4 -flexible 4 -defending 4 -hungrily 4 -basset 4 -sends 4 -dines 4 -theirs 4 -evidence 4 -fragile 4 -retired 4 -frightened 4 -e. 4 -comprised 4 -fruity 4 -sterilized 4 -garments 4 -abraham 4 -fanned 4 -concern 4 -developing 4 -hens 4 -pitbull 4 -barbecuing 4 -silverwear 4 -salem 4 -baloons 4 -trudging 4 -attemping 4 -peole 4 -unicycle 4 -sardines 4 -child-sized 4 -wildwood 4 -cemented 4 -dire 4 -bask 4 -knots 4 -scrolled 4 -bnsf 4 -atable 4 -kindle 4 -macaw 4 -u.s 4 -necking 4 -wow 4 -woo 4 -buzz 4 -liked 4 -linen-covered 4 -minced 4 -toucan 4 -willed 4 -heats 4 -evenly 4 -attired 4 -ukulele 4 -waveland 4 -edifice 4 -traintracks 4 -1/2 4 -pansies 4 -commerical 4 -deaker 4 -bedsheets 4 -yong 4 -beg 4 -affair 4 -slows 4 -shy 4 -pointer 4 -seperating 4 -kkk 4 -scrubland 4 -reptile 4 -mississippi 4 -daniels 4 -projecting 4 -curbed 4 -notepads 4 -solution 4 -drew 4 -oman 4 -ratty 4 -thirds 4 -pebbled 4 -vows 4 -quantities 4 -exception 4 -sanitized 4 -romp 4 -separately 4 -fore 4 -shin 4 -alerts 4 -primed 4 -flowerbed 4 -stapler 4 -forages 4 -upraised 4 -oom 4 -capturing 4 -scalloped 4 -aviator 4 -greenfield 4 -finance 4 -fuschia 4 -streetsign 4 -impersonators 4 -contrasts 4 -nun-chuck 4 -dates 4 -thighs 4 -swabs 4 -spills 4 -deluxe 4 -alerting 4 -cylce 4 -des 4 -drained 4 -sleepily 4 -dignitaries 4 -settlers 4 -approximately 4 -polishing 4 -kk 4 -generators 4 -honking 4 -halve 4 -matters 4 -outside.. 4 -televsion 4 -hard-sided 4 -fishes 4 -loungers 4 -graves 4 -flagstone 4 -semitrailer 4 -parkway 4 -regions 4 -ln 4 -ls 4 -pitts 4 -rec 4 -rung 4 -gravity 4 -areal 4 -instant 4 -buster 4 -winchester 4 -backpacker 4 -tests 4 -smartly 4 -preen 4 -flaky 4 -doesnt 4 -wish 4 -collies 4 -skiboard 4 -pilaf 4 -clasps 4 -tweed 4 -streer 4 -mischievous 4 -stared 4 -chateau 4 -entertained 4 -onit 4 -ms 4 -acrobat 4 -oriented 4 -shellfish 4 -grassless 4 -rinsing 4 -bashed 4 -swept 4 -boss 4 -improve 4 -sanitation 4 -muddied 4 -decanter 4 -spongebob 4 -clubs 4 -shards 4 -na 4 -mildly 4 -lapels 4 -twentieth 4 -houseboats 4 -fangs 4 -coordinated 4 -circa 4 -require 4 -ant 4 -frig 4 -jeter 4 -weapons 4 -votive 4 -stylishly 4 -buys 4 -events 4 -modes 4 -engulfed 4 -virginia 4 -schedules 4 -om 4 -og 4 -lovers 4 -igloo 4 -saxophone 4 -rate 4 -1900 4 -oat 4 -paradise 4 -wisconsin 4 -swarm 4 -perusing 4 -bohemian 4 -condo 4 -ranchers 4 -snowshoeing 4 -pa 4 -knack 4 -abilities 4 -peephole 4 -standalone 4 -mustached 4 -mustaches 4 -asain 4 -stalking 4 -associates 4 -overseeing 4 -issues 4 -slated 4 -heir 4 -udder 4 -planning 4 -pastoral 4 -aplate 4 -hunters 4 -painter 4 -buzzards 4 -bunkbeds 4 -dressings 4 -capabilities 4 -groomer 4 -certificate 4 -rickety 4 -guitarist 4 -meadows 4 -bile 4 -pollution 4 -currency 4 -pocketknife 4 -volcano 4 -drove 4 -shipped 4 -speedy 4 -rotary 4 -wipeout 4 -beech 4 -rockers 4 -someplace 4 -bock 4 -melt 4 -hoding 4 -si 4 -tore 4 -neighbourhood 4 -footwear 4 -drywall 4 -ironically 4 -backyards 4 -freezing 4 -sited 4 -aluminium 4 -jarred 4 -catchup 4 -laboratory 4 -partitioned 4 -kneck 4 -escaping 4 -restaruant 4 -morgan 4 -subtitles 4 -supine 4 -glitter 4 -tortellini 4 -worktable 4 -downpour 4 -mountain.. 4 -bernard 4 -scallop 4 -representative 4 -error 4 -nautical 4 -freighter 4 -graduating 4 -poverty 4 -technical 4 -bullhorn 4 -dominos 4 -bitty 4 -truth 4 -maciel 4 -yello 4 -fouling 4 -amphibious 4 -segment 4 -locust 4 -brightly-painted 4 -blowdrying 4 -buffalos 4 -strides 4 -raquets 4 -whispering 4 -bookends 4 -footbridge 4 -roomy 4 -software 4 -backcountry 4 -ing 4 -capers 4 -watchers 4 -total 4 -aware 4 -interview 4 -cherubs 4 -castle-like 4 -bang 4 -dishwashers 4 -seater 4 -hollandaise 4 -insulated 4 -maiden 4 -duckling 4 -valance 4 -voting 4 -stove/oven 4 -bras 4 -warmth 4 -sheltered 4 -hispanic 4 -jekyll 4 -lashes 4 -gearing 4 -juiced 4 -paces 4 -kitteh 4 -goslings 4 -pictorial 4 -accordian 4 -urinates 4 -whacked 4 -trumpets 4 -intrigued 4 -hand-held 4 -mascara 4 -browser 4 -commander 4 -statuette 4 -lob 4 -somerset 4 -wrestlers 4 -computerized 4 -10:20 4 -vet 4 -ver 4 -handwriting 4 -soot 4 -nestles 4 -anticipates 4 -wolverine 4 -squirts 4 -withered 4 -experienced 4 -reminiscent 4 -full-length 4 -olden 4 -cocks 4 -rummage 4 -referees 4 -crap 4 -astronomical 4 -photoed 4 -lettuces 4 -watermark 4 -follow-through 4 -detector 4 -boatyard 4 -brinks 4 -demo 4 -frequently 4 -magician 4 -immediately 4 -equally 4 -serveral 4 -stonework 4 -acrylic 4 -orchestra 4 -wiimotes 4 -100 4 -ignoring 4 -suitable 4 -seventy 4 -leaks 4 -bug-gee 4 -croup 4 -seek 4 -doe 4 -warrior 4 -entitled 4 -differing 4 -hummingbirds 4 -repurposed 4 -coney 4 -petersburg 4 -cleaners 4 -bruises 4 -tiolet 4 -ayi 4 -stitting 4 -soundboard 4 -cascade 4 -kabobs 4 -goth 4 -shipyard 4 -up-close 4 -chalet 4 -diversion 4 -macro 4 -pacman 4 -cribs 4 -blackbird 4 -oral 4 -cigars 4 -rooting 4 -radar 4 -filters 4 -23 4 -brewers 4 -disneyland 4 -believe 4 -sweatshirts 4 -frisco 4 -kitchens 4 -mystic 4 -clack 4 -tourbus 4 -gordon 4 -asians 4 -agricultural 4 -min 4 -gibson 4 -monochromatic 4 -enhanced 4 -bicyclers 4 -ac 4 -convection 4 -knotty 4 -tilled 4 -iamge 4 -parted 4 -fattening 4 -hydran 4 -deciduous 4 -install 4 -suvs 4 -rank 4 -tog 4 -advising 4 -q-tips 4 -refreshment 4 -disgust 4 -habit 4 -springtime 4 -mid-leap 4 -seasonal 4 -collard 4 -banannas 4 -51 4 -candlelit 4 -old-style 4 -hawaii 4 -lawns 4 -fuselage 4 -legally 4 -reno 4 -masai 4 -workman 4 -recumbent 4 -pom 4 -relection 4 -coupled 4 -candied 4 -actress 4 -thimble 4 -rosy 4 -confines 4 -bodacious 4 -promote 4 -n. 4 -nightlife 4 -avacado 4 -briefly 4 -hwy 4 -quizzical 4 -birdseed 4 -side-view 4 -stephen 4 -ding 4 -76 4 -70 4 -compiled 4 -fastest 4 -railyard 4 -rollers 4 -batroom 4 -mid-century 4 -miniture 4 -porridge 4 -dormant 4 -sudsy 4 -filet 4 -post-it 4 -junior 4 -swats 4 -kimonos 4 -snails 4 -stone-paved 4 -hippie 4 -undecorated 4 -bungee 4 -badger 4 -edging 4 -89 4 -weaves 4 -4th 4 -echo 4 -walk/do 4 -kegs 4 -teapots 4 -drips 4 -refg 4 -kissed 4 -recline 4 -concourse 4 -vat 4 -sandles 4 -piling 4 -swining 4 -flare 4 -hop 4 -offloaded 4 -airbus 4 -bitter 4 -nations 4 -nineteenth 4 -castro 4 -harmony 4 -gameboy 4 -ole 4 -consulting 4 -get-together 4 -grandparents 4 -sheeting 4 -slush 4 -snapple 4 -maintaining 4 -boyfriend 4 -sam 4 -cloak 4 -roger 4 -discolored 4 -back.. 4 -scooped 4 -mammal 4 -agitated 4 -forwards 4 -florist 4 -frock 4 -motocycles 4 -rite 4 -turrets 4 -half-empty 4 -stylist 4 -apiece 4 -raging 4 -energetic 4 -occurred 4 -vinegar 4 -showboat 4 -traces 4 -scare 4 -printers 4 -umberellas 4 -fielded 4 -ikea 4 -hauler 4 -brilliantly 4 -solider 4 -belgium 4 -contently 4 -schedule 4 -planting 4 -forests 4 -montrose 4 -inning 4 -told 4 -spooky 4 -ither 4 -lan 4 -satisfied 4 -rooom 4 -contrasted 4 -powers 4 -forced 4 -phase 4 -observer 4 -received 4 -ill 4 -homebase 4 -skaeboard 4 -90th 4 -drip 4 -spotlessly 4 -columbus 4 -howling 4 -reality 4 -switches 4 -jogger 4 -steeply 4 -bording 4 -clue 4 -scaling 4 -snapshots 4 -yourself 4 -karate 4 -telivision 4 -gratified 4 -amplifier 4 -plate.. 4 -bedtime 4 -elementary 4 -multitasking 4 -signalling 4 -linden 4 -charlottesville 4 -pho 4 -concentration 4 -promenade 4 -songbird 4 -steele 4 -chariots 4 -tufts 4 -albino 4 -ariel 4 -info 4 -mosque 4 -beach-goers 4 -soggy 4 -griaffe 4 -umbrells 4 -bart 4 -terracotta 4 -floodway 4 -awe 4 -plumber 4 -scrambles 4 -creamed 4 -pirched 4 -two-person 4 -eclairs 4 -twitter 4 -humorously 4 -usaf 4 -offset 4 -strategy 4 -poinsettia 4 -hosting 4 -intersects 4 -penne 4 -obstruction 4 -trousers 4 -coin-operated 4 -permit 4 -pecan 4 -gasoline 4 -scoring 4 -fiery 4 -giraffees 4 -streaked 4 -serrated 4 -microscope 4 -vacuuming 4 -claims 4 -room.. 4 -specials 4 -overlaid 4 -clerk 4 -stallion 4 -high-tech 4 -massage 4 -pidgeons 4 -yorkshire 4 -pebbly 4 -gage 4 -trader 4 -slum 4 -romping 4 -movements 4 -harsh 4 -tatoo 4 -manufacturer 4 -whited 4 -sandpiper 4 -guadalajara 4 -rain-wet 4 -chested 4 -particularly 4 -fins 4 -bet 4 -upgraded 4 -tanding 4 -interlocking 4 -rattan 4 -horton 4 -multilevel 4 -hopes 4 -lend 4 -greenville 4 -rinse 4 -glases 4 -atlas 4 -sharks 4 -semi-formal 4 -billed 4 -chief 4 -octopuses 4 -shreds 4 -stances 4 -'stop 4 -turbans 4 -establishments 4 -minimally 4 -occupant 4 -phases 4 -rowan 4 -nudges 4 -tremendous 4 -novels 4 -pinwheels 4 -buttoned 4 -buckle 4 -reminds 4 -j 4 -brocclie 4 -potholders 4 -claiming 4 -alright 4 -8th 4 -humps 4 -woodpecker 4 -baggies 4 -sickly 4 -wanted 4 -classes 4 -hammertime 4 -1970 4 -crazing 4 -drawbridge 4 -betting 4 -carryout 4 -parched 4 -furled 4 -willows 4 -steadily 4 -versions 4 -latch 4 -navigation 4 -light-colored 4 -jetblue 4 -living-room 4 -secures 4 -exclamation 4 -dirtbikes 4 -trenchcoat 4 -blue/white 4 -elphant 4 -blob 4 -scrapbook 4 -racehorses 4 -flat-bread 4 -trundle 4 -pensively 4 -fiddles 4 -scowling 4 -relaxation 4 -boneless 4 -fiber 4 -rollerblades 4 -churning 4 -coasts 4 -skatebord 4 -gala 4 -para-sails 4 -firewood 4 -movers 4 -cronuts 4 -slideshow 4 -stolen 4 -smirking 4 -rib 4 -shephard 4 -ignore 4 -casks 4 -people.. 4 -z 4 -scroll 4 -mardi 4 -mits 4 -coverlet 4 -tax 4 -interrupted 4 -institution 4 -behinds 4 -tommy 4 -ghetto 4 -cleavage 4 -generations 4 -panning 4 -hippos 4 -octagon 4 -leader 4 -hitchhiking 4 -dozing 4 -kerouac 4 -moguls 4 \ No newline at end of file diff --git a/examples/text_generation/tutorial_generate_text.py b/examples/text_generation/tutorial_generate_text.py deleted file mode 100644 index f17440b62..000000000 --- a/examples/text_generation/tutorial_generate_text.py +++ /dev/null @@ -1,332 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -# Copyright 2019 TensorLayer. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Example of Synced sequence input and output. - -Generate text using LSTM. - -Data: https://github.com/tensorlayer/tensorlayer/tree/master/example/data/ - -""" - -import os -import re -import time - -import nltk -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - -_UNK = "_UNK" - - -def basic_clean_str(string): - """Tokenization/string cleaning for a datasets.""" - string = re.sub(r"\n", " ", string) # '\n' --> ' ' - string = re.sub(r"\'s", " \'s", string) # it's --> it 's - string = re.sub(r"\’s", " \'s", string) - string = re.sub(r"\'ve", " have", string) # they've --> they have - string = re.sub(r"\’ve", " have", string) - string = re.sub(r"\'t", " not", string) # can't --> can not - string = re.sub(r"\’t", " not", string) - string = re.sub(r"\'re", " are", string) # they're --> they are - string = re.sub(r"\’re", " are", string) - string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I - string = re.sub(r"\’d", "", string) - string = re.sub(r"\'ll", " will", string) # I'll --> I will - string = re.sub(r"\’ll", " will", string) - string = re.sub(r"\“", " ", string) # “a” --> “ a ” - string = re.sub(r"\”", " ", string) - string = re.sub(r"\"", " ", string) # "a" --> " a " - string = re.sub(r"\'", " ", string) # they' --> they ' - string = re.sub(r"\’", " ", string) # they’ --> they ’ - string = re.sub(r"\.", " . ", string) # they. --> they . - string = re.sub(r"\,", " , ", string) # they, --> they , - string = re.sub(r"\!", " ! ", string) - string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost - string = re.sub(r"\(", " ", string) # (they) --> ( they) - string = re.sub(r"\)", " ", string) # ( they) --> ( they ) - string = re.sub(r"\]", " ", string) # they] --> they ] - string = re.sub(r"\[", " ", string) # they[ --> they [ - string = re.sub(r"\?", " ", string) # they? --> they ? - string = re.sub(r"\>", " ", string) # they> --> they > - string = re.sub(r"\<", " ", string) # they< --> they < - string = re.sub(r"\=", " ", string) # easier= --> easier = - string = re.sub(r"\;", " ", string) # easier; --> easier ; - string = re.sub(r"\;", " ", string) - string = re.sub(r"\:", " ", string) # easier: --> easier : - string = re.sub(r"\"", " ", string) # easier" --> easier " - string = re.sub(r"\$", " ", string) # $380 --> $ 380 - string = re.sub(r"\_", " ", string) # _100 --> _ 100 - string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome - return string.strip().lower() # lowercase - - -def customized_clean_str(string): - """Tokenization/string cleaning for a datasets.""" - string = re.sub(r"\n", " ", string) # '\n' --> ' ' - string = re.sub(r"\'s", " \'s", string) # it's --> it 's - string = re.sub(r"\’s", " \'s", string) - string = re.sub(r"\'ve", " have", string) # they've --> they have - string = re.sub(r"\’ve", " have", string) - string = re.sub(r"\'t", " not", string) # can't --> can not - string = re.sub(r"\’t", " not", string) - string = re.sub(r"\'re", " are", string) # they're --> they are - string = re.sub(r"\’re", " are", string) - string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I - string = re.sub(r"\’d", "", string) - string = re.sub(r"\'ll", " will", string) # I'll --> I will - string = re.sub(r"\’ll", " will", string) - string = re.sub(r"\“", " “ ", string) # “a” --> “ a ” - string = re.sub(r"\”", " ” ", string) - string = re.sub(r"\"", " “ ", string) # "a" --> " a " - string = re.sub(r"\'", " ' ", string) # they' --> they ' - string = re.sub(r"\’", " ' ", string) # they’ --> they ' - string = re.sub(r"\.", " . ", string) # they. --> they . - string = re.sub(r"\,", " , ", string) # they, --> they , - string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost - string = re.sub(r"\(", " ( ", string) # (they) --> ( they) - string = re.sub(r"\)", " ) ", string) # ( they) --> ( they ) - string = re.sub(r"\!", " ! ", string) # they! --> they ! - string = re.sub(r"\]", " ] ", string) # they] --> they ] - string = re.sub(r"\[", " [ ", string) # they[ --> they [ - string = re.sub(r"\?", " ? ", string) # they? --> they ? - string = re.sub(r"\>", " > ", string) # they> --> they > - string = re.sub(r"\<", " < ", string) # they< --> they < - string = re.sub(r"\=", " = ", string) # easier= --> easier = - string = re.sub(r"\;", " ; ", string) # easier; --> easier ; - string = re.sub(r"\;", " ; ", string) - string = re.sub(r"\:", " : ", string) # easier: --> easier : - string = re.sub(r"\"", " \" ", string) # easier" --> easier " - string = re.sub(r"\$", " $ ", string) # $380 --> $ 380 - string = re.sub(r"\_", " _ ", string) # _100 --> _ 100 - string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome - return string.strip().lower() # lowercase - - -def customized_read_words(input_fpath): # , dictionary): - with open(input_fpath, "r", encoding="utf8") as f: - words = f.read() - # Clean the data - words = customized_clean_str(words) - # Split each word - return words.split() - - -def main_restore_embedding_layer(): - """How to use Embedding layer, and how to convert IDs to vector, - IDs to words, etc. - """ - # Step 1: Build the embedding matrix and load the existing embedding matrix. - vocabulary_size = 50000 - embedding_size = 128 - model_file_name = "model_word2vec_50k_128" - batch_size = None - - if not os.path.exists(model_file_name + ".npy"): - raise Exception( - "Pretrained embedding matrix not found. " - "Hint: Please pre-train the default model in " - "`examples/text_word_embedding/tutorial_word2vec_basic.py`." - ) - - print("Load existing embedding matrix and dictionaries") - all_var = tl.files.load_npy_to_any(name=model_file_name + '.npy') - data = all_var['data'] - count = all_var['count'] - dictionary = all_var['dictionary'] - reverse_dictionary = all_var['reverse_dictionary'] - - tl.nlp.save_vocab(count, name='vocab_' + model_file_name + '.txt') - - del all_var, data, count - - class Embedding_Model(Model): - - def __init__(self): - super(Embedding_Model, self).__init__() - self.embedding = Embedding(vocabulary_size, embedding_size) - - def forward(self, inputs): - return self.embedding(inputs) - - model = Embedding_Model() - model.eval() - - # TODO: assign certain parameters to model - model.load_weights(model_file_name + ".hdf5", skip=True, in_order=False) - - # Step 2: Input word(s), output the word vector(s). - word = 'hello' - word_id = dictionary[word] - print('word_id:', word_id) - - words = ['i', 'am', 'tensor', 'layer'] - word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK) - context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary) - print('word_ids:', word_ids) - print('context:', context) - - vector = model(word_id) - print('vector:', vector.shape) - print(vector) - - vectors = model(word_ids) - print('vectors:', vectors.shape) - print(vectors) - - -class Text_Generation_Net(Model): - - def __init__(self, vocab_size, hidden_size, init): - super(Text_Generation_Net, self).__init__() - - self.embedding = Embedding(vocab_size, hidden_size, init, name='embedding') - self.lstm = tl.layers.RNN( - cell=tf.keras.layers.LSTMCell(hidden_size), return_last_output=False, return_last_state=True, - return_seq_2d=True, in_channels=hidden_size - ) - self.out_dense = Dense(vocab_size, in_channels=hidden_size, W_init=init, b_init=init, act=None, name='output') - - def forward(self, inputs, initial_state=None): - embedding_vector = self.embedding(inputs) - lstm_out, final_state = self.lstm(embedding_vector, initial_state=initial_state) - logits = self.out_dense(lstm_out) - return logits, final_state - - -def main_lstm_generate_text(): - """Generate text by Synced sequence input and output.""" - # rnn model and update (describtion: see tutorial_ptb_lstm.py) - init_scale = 0.1 - learning_rate = 1e-3 - sequence_length = 20 - hidden_size = 200 - max_epoch = 100 - batch_size = 16 - - top_k_list = [1, 3, 5, 10] - print_length = 30 - - model_file_name = "model_generate_text.hdf5" - - # ===== Prepare Data - words = customized_read_words(input_fpath="data/trump/trump_text.txt") - - vocab = tl.nlp.create_vocab([words], word_counts_output_file='vocab.txt', min_word_count=1) - vocab = tl.nlp.Vocabulary('vocab.txt', unk_word="") - vocab_size = vocab.unk_id + 1 - train_data = [vocab.word_to_id(word) for word in words] - - # Set the seed to generate sentence. - seed = "it is a" - # seed = basic_clean_str(seed).split() - seed = nltk.tokenize.word_tokenize(seed) - print('seed : %s' % seed) - - init = tl.initializers.random_uniform(-init_scale, init_scale) - - net = Text_Generation_Net(vocab_size, hidden_size, init) - - train_weights = net.trainable_weights - optimizer = tf.optimizers.Adam(lr=learning_rate) - - # ===== Training - - print("\nStart learning a model to generate text") - for i in range(max_epoch): - - print("Epoch: %d/%d" % (i + 1, max_epoch)) - epoch_size = ((len(train_data) // batch_size) - 1) // sequence_length - - start_time = time.time() - costs = 0.0 - iters = 0 - - net.train() - # reset all states at the begining of every epoch - lstm_state = None - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)): - with tf.GradientTape() as tape: - ## compute outputs - logits, lstm_state = net(x, initial_state=lstm_state) - ## compute loss and update model - cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss') - - grad = tape.gradient(cost, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - costs += cost - iters += 1 - - if step % (epoch_size // 10) == 1: - print( - "%.3f perplexity: %.3f speed: %.0f wps" % ( - step * 1.0 / epoch_size, np.exp(costs / iters), - iters * batch_size * sequence_length * batch_size / (time.time() - start_time) - ) - ) - train_perplexity = np.exp(costs / iters) - # print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) - print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_epoch, train_perplexity)) - - net.eval() - # for diversity in diversity_list: - # testing: sample from top k words - for top_k in top_k_list: - # Testing, generate some text from a given seed. - lstm_state = None - outs_id = [vocab.word_to_id(w) for w in seed] - # feed the seed to initialize the state for generation. - for ids in outs_id[:-1]: - a_id = np.asarray(ids).reshape(1, 1) - _, lstm_state = net(a_id, initial_state=lstm_state) - - # feed the last word in seed, and start to generate sentence. - a_id = outs_id[-1] - for _ in range(print_length): - a_id = np.asarray(a_id).reshape(1, 1) - logits, lstm_state = net(a_id, initial_state=lstm_state) - out = tf.nn.softmax(logits) - # Without sampling - # a_id = np.argmax(out[0]) - # Sample from all words, if vocab_size is large, - # this may have numeric error. - # a_id = tl.nlp.sample(out[0], diversity) - # Sample from the top k words. - a_id = tl.nlp.sample_top(out[0].numpy(), top_k=top_k) - outs_id.append(a_id) - sentence = [vocab.id_to_word(w) for w in outs_id] - sentence = " ".join(sentence) - # print(diversity, ':', sentence) - print(top_k, ':', sentence) - - print("Save model") - net.save_weights(model_file_name) - - -if __name__ == '__main__': - # Restore a pretrained embedding matrix - # main_restore_embedding_layer() - - # How to generate text from a given context - main_lstm_generate_text() diff --git a/examples/text_ptb/README.md b/examples/text_ptb/README.md deleted file mode 100644 index d3fd9c9e7..000000000 --- a/examples/text_ptb/README.md +++ /dev/null @@ -1 +0,0 @@ -### Language modeling on Penn Tree Bank (PTB) dataset \ No newline at end of file diff --git a/examples/text_ptb/tutorial_ptb_lstm.py b/examples/text_ptb/tutorial_ptb_lstm.py deleted file mode 100644 index 6f215abba..000000000 --- a/examples/text_ptb/tutorial_ptb_lstm.py +++ /dev/null @@ -1,523 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -r"""Example of Synced sequence input and output. - -This is a reimpmentation of the TensorFlow official PTB example in : -tensorflow/models/rnn/ptb - -The batch_size can be seem as how many concurrent computations.\n -As the following example shows, the first batch learn the sequence information by using 0 to 9.\n -The second batch learn the sequence information by using 10 to 19.\n -So it ignores the information from 9 to 10 !\n -If only if we set the batch_size = 1, it will consider all information from 0 to 20.\n - -The meaning of batch_size here is not the same with the MNIST example. In MNIST example, -batch_size reflects how many examples we consider in each iteration, while in -PTB example, batch_size is how many concurrent processes (segments) -for speed up computation. - -Some Information will be ignored if batch_size > 1, however, if your dataset -is "long" enough (a text corpus usually has billions words), the ignored -information would not effect the final result. - -In PTB tutorial, we setted batch_size = 20, so we cut the dataset into 20 segments. -At the begining of each epoch, we initialize (reset) the 20 RNN states for 20 -segments, then go through 20 segments separately. - -The training data will be generated as follow:\n - ->>> train_data = [i for i in range(20)] ->>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3): ->>> x, y = batch ->>> print(x, '\n',y) -... [[ 0 1 2] <---x 1st subset/ iteration -... [10 11 12]] -... [[ 1 2 3] <---y -... [11 12 13]] -... -... [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration -... [13 14 15]] <--- 2nd batch input -... [[ 4 5 6] <--- 1st batch target -... [14 15 16]] <--- 2nd batch target -... -... [[ 6 7 8] 3rd subset/ iteration -... [16 17 18]] -... [[ 7 8 9] -... [17 18 19]] - -Hao Dong: This example can also be considered as pre-training of the word -embedding matrix. - -About RNN ----------- -$ Karpathy Blog : http://karpathy.github.io/2015/05/21/rnn-effectiveness/ - -More TensorFlow official RNN examples can be found here ---------------------------------------------------------- -$ RNN for PTB : https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html#recurrent-neural-networks -$ Seq2seq : https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html#sequence-to-sequence-models -$ translation : tensorflow/models/rnn/translate - -Example / benchmark for building a PTB LSTM model. - -Trains the model described in: -(Zaremba, et. al.) Recurrent Neural Network Regularization -http://arxiv.org/abs/1409.2329 - -There are 3 supported model configurations: -=========================================== -| config | epochs | train | valid | test -=========================================== -| small | 13 | 37.99 | 121.39 | 115.91 -| medium | 39 | 48.45 | 86.16 | 82.07 -| large | 55 | 37.87 | 82.62 | 78.29 -The exact results may vary depending on the random initialization. - -The hyperparameters used in the model: -- init_scale - the initial scale of the weights -- learning_rate - the initial value of the learning rate -- max_grad_norm - the maximum permissible norm of the gradient -- num_layers - the number of LSTM layers -- num_steps - the number of unrolled steps of LSTM -- hidden_size - the number of LSTM units -- max_epoch - the number of epochs trained with the initial learning rate -- max_max_epoch - the total number of epochs for training -- keep_prob - the probability of keeping weights in the dropout layer -- lr_decay - the decay of the learning rate for each epoch after "max_epoch" -- batch_size - the batch size - -The data required for this example is in the data/ dir of the -PTB dataset from Tomas Mikolov's webpage: - -$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz -$ tar xvf simple-examples.tgz - - -A) use the zero_state function on the cell object - -B) for an rnn, all time steps share weights. We use one matrix to keep all -gate weights. Split by column into 4 parts to get the 4 gate weight matrices. - -""" -import argparse -import sys -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.models import Model - -tl.logging.set_verbosity(tl.logging.DEBUG) - - -def process_args(args): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--model', default='small', choices=['small', 'medium', 'large'], - help="A type of model. Possible options are: small, medium, large." - ) - parameters = parser.parse_args(args) - return parameters - - -class PTB_Net(Model): - - def __init__(self, vocab_size, hidden_size, init, keep): - super(PTB_Net, self).__init__() - - self.embedding = tl.layers.Embedding(vocab_size, hidden_size, init) - self.dropout1 = tl.layers.Dropout(keep=keep) - self.lstm1 = tl.layers.RNN( - cell=tf.keras.layers.LSTMCell(hidden_size), return_last_output=False, return_last_state=True, - return_seq_2d=False, in_channels=hidden_size - ) - self.dropout2 = tl.layers.Dropout(keep=keep) - self.lstm2 = tl.layers.RNN( - cell=tf.keras.layers.LSTMCell(hidden_size), return_last_output=False, return_last_state=True, - return_seq_2d=True, in_channels=hidden_size - ) - self.dropout3 = tl.layers.Dropout(keep=keep) - self.out_dense = tl.layers.Dense(vocab_size, in_channels=hidden_size, W_init=init, b_init=init, act=None) - - def forward(self, inputs, lstm1_initial_state=None, lstm2_initial_state=None): - inputs = self.embedding(inputs) - inputs = self.dropout1(inputs) - lstm1_out, lstm1_state = self.lstm1(inputs, initial_state=lstm1_initial_state) - inputs = self.dropout2(lstm1_out) - lstm2_out, lstm2_state = self.lstm2(inputs, initial_state=lstm2_initial_state) - inputs = self.dropout3(lstm2_out) - logits = self.out_dense(inputs) - return logits, lstm1_state, lstm2_state - - -def main(): - """ - The core of the model consists of an LSTM cell that processes one word at - a time and computes probabilities of the possible continuations of the - sentence. The memory state of the network is initialized with a vector - of zeros and gets updated after reading each word. Also, for computational - reasons, we will process data in mini-batches of size batch_size. - - """ - param = process_args(sys.argv[1:]) - - if param.model == "small": - init_scale = 0.1 - learning_rate = 1e-3 - max_grad_norm = 5 - num_steps = 20 - hidden_size = 200 - max_epoch = 4 - max_max_epoch = 13 - keep_prob = 1.0 - lr_decay = 0.5 - batch_size = 20 - vocab_size = 10000 - elif param.model == "medium": - init_scale = 0.05 - learning_rate = 1e-3 - max_grad_norm = 5 - # num_layers = 2 - num_steps = 35 - hidden_size = 650 - max_epoch = 6 - max_max_epoch = 39 - keep_prob = 0.5 - lr_decay = 0.8 - batch_size = 20 - vocab_size = 10000 - elif param.model == "large": - init_scale = 0.04 - learning_rate = 1e-3 - max_grad_norm = 10 - # num_layers = 2 - num_steps = 35 - hidden_size = 1500 - max_epoch = 14 - max_max_epoch = 55 - keep_prob = 0.35 - lr_decay = 1 / 1.15 - batch_size = 20 - vocab_size = 10000 - else: - raise ValueError("Invalid model: %s", param.model) - - # Load PTB dataset - train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset() - # train_data = train_data[0:int(100000/5)] # for fast testing - print('len(train_data) {}'.format(len(train_data))) # 929589 a list of int - print('len(valid_data) {}'.format(len(valid_data))) # 73760 a list of int - print('len(test_data) {}'.format(len(test_data))) # 82430 a list of int - print('vocab_size {}'.format(vocab_size)) # 10000 - - # One int represents one word, the meaning of batch_size here is not the - # same with MNIST example, it is the number of concurrent processes for - # computational reasons. - - init = tf.random_uniform_initializer(-init_scale, init_scale) - net = PTB_Net(hidden_size=hidden_size, vocab_size=vocab_size, init=init, keep=keep_prob) - - # Truncated Backpropagation for training - lr = tf.Variable(0.0, trainable=False) - train_weights = net.weights - optimizer = tf.optimizers.Adam(lr=lr) - - print(net) - - print("\nStart learning a language model by using PTB dataset") - for i in range(max_max_epoch): - # decreases the initial learning rate after several - # epoachs (defined by ``max_epoch``), by multipling a ``lr_decay``. - new_lr_decay = lr_decay**max(i - max_epoch, 0.0) - lr.assign(learning_rate * new_lr_decay) - - # Training - net.train() - print("Epoch: %d/%d Learning rate: %.3f" % (i + 1, max_max_epoch, lr.value())) - epoch_size = ((len(train_data) // batch_size) - 1) // num_steps - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining of every epoch - lstm1_state = None - lstm2_state = None - - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)): - - with tf.GradientTape() as tape: - ## compute outputs - logits, lstm1_state, lstm2_state = net( - x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state - ) - ## compute loss and update model - cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss') - - grad, _ = tf.clip_by_global_norm(tape.gradient(cost, train_weights), max_grad_norm) - optimizer.apply_gradients(zip(grad, train_weights)) - - costs += cost - iters += 1 - - if step % (epoch_size // 10) == 10: - print( - "%.3f perplexity: %.3f speed: %.0f wps" % ( - step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size * num_steps / - (time.time() - start_time) - ) - ) - train_perplexity = np.exp(costs / iters) - print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity)) - - # Validing - net.eval() - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining of every epoch - lstm1_state = None - lstm2_state = None - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)): - ## compute outputs - logits, lstm1_state, lstm2_state = net(x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state) - ## compute loss and update model - cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss') - costs += cost - iters += 1 - valid_perplexity = np.exp(costs / iters) - print("Epoch: %d/%d Valid Perplexity: %.3f" % (i + 1, max_max_epoch, valid_perplexity)) - - print("Evaluation") - # Testing - net.eval() - # go through the test set step by step, it will take a while. - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining - lstm1_state = None - lstm2_state = None - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=1, num_steps=1)): - ## compute outputs - logits, lstm1_state, lstm2_state = net(x, lstm1_initial_state=lstm1_state, lstm2_initial_state=lstm2_state) - ## compute loss and update model - cost = tl.cost.cross_entropy(logits, tf.reshape(y, [-1]), name='train_loss') - costs += cost - iters += 1 - test_perplexity = np.exp(costs / iters) - print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time)) - - print( - "More example: Text generation using Trump's speech data: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" - ) - - -if __name__ == "__main__": - main() - -# log of SmallConfig -# Start learning a language model by using PTB dataset -# Epoch: 1 Learning rate: 1.000 -# 0.004 perplexity: 5512.735 speed: 4555 wps -# 0.104 perplexity: 841.289 speed: 8823 wps -# 0.204 perplexity: 626.273 speed: 9292 wps -# 0.304 perplexity: 505.628 speed: 9472 wps -# 0.404 perplexity: 435.580 speed: 9551 wps -# 0.504 perplexity: 390.108 speed: 9555 wps -# 0.604 perplexity: 351.379 speed: 9546 wps -# 0.703 perplexity: 324.846 speed: 9579 wps -# 0.803 perplexity: 303.824 speed: 9574 wps -# 0.903 perplexity: 284.468 speed: 9551 wps -# Epoch: 1 Train Perplexity: 269.981 -# Epoch: 1 Valid Perplexity: 178.561 -# Epoch: 2 Learning rate: 1.000 -# 0.004 perplexity: 211.632 speed: 7697 wps -# 0.104 perplexity: 151.509 speed: 9488 wps -# 0.204 perplexity: 158.947 speed: 9674 wps -# 0.304 perplexity: 153.963 speed: 9806 wps -# 0.404 perplexity: 150.938 speed: 9817 wps -# 0.504 perplexity: 148.413 speed: 9824 wps -# 0.604 perplexity: 143.763 speed: 9765 wps -# 0.703 perplexity: 141.616 speed: 9731 wps -# 0.803 perplexity: 139.618 speed: 9781 wps -# 0.903 perplexity: 135.880 speed: 9735 wps -# Epoch: 2 Train Perplexity: 133.771 -# Epoch: 2 Valid Perplexity: 142.595 -# Epoch: 3 Learning rate: 1.000 -# 0.004 perplexity: 146.902 speed: 8345 wps -# 0.104 perplexity: 105.647 speed: 9572 wps -# 0.204 perplexity: 114.261 speed: 9585 wps -# 0.304 perplexity: 111.237 speed: 9586 wps -# 0.404 perplexity: 110.181 speed: 9605 wps -# 0.504 perplexity: 109.383 speed: 9601 wps -# 0.604 perplexity: 106.722 speed: 9635 wps -# 0.703 perplexity: 106.075 speed: 9597 wps -# 0.803 perplexity: 105.481 speed: 9624 wps -# 0.903 perplexity: 103.262 speed: 9618 wps -# Epoch: 3 Train Perplexity: 102.272 -# Epoch: 3 Valid Perplexity: 131.884 -# Epoch: 4 Learning rate: 1.000 -# 0.004 perplexity: 118.127 speed: 7867 wps -# 0.104 perplexity: 85.530 speed: 9330 wps -# 0.204 perplexity: 93.559 speed: 9399 wps -# 0.304 perplexity: 91.141 speed: 9386 wps -# 0.404 perplexity: 90.668 speed: 9462 wps -# 0.504 perplexity: 90.366 speed: 9516 wps -# 0.604 perplexity: 88.479 speed: 9477 wps -# 0.703 perplexity: 88.275 speed: 9533 wps -# 0.803 perplexity: 88.091 speed: 9560 wps -# 0.903 perplexity: 86.430 speed: 9516 wps -# Epoch: 4 Train Perplexity: 85.839 -# Epoch: 4 Valid Perplexity: 128.408 -# Epoch: 5 Learning rate: 1.000 -# 0.004 perplexity: 100.077 speed: 7682 wps -# 0.104 perplexity: 73.856 speed: 9197 wps -# 0.204 perplexity: 81.242 speed: 9266 wps -# 0.304 perplexity: 79.315 speed: 9375 wps -# 0.404 perplexity: 79.009 speed: 9439 wps -# 0.504 perplexity: 78.874 speed: 9377 wps -# 0.604 perplexity: 77.430 speed: 9436 wps -# 0.703 perplexity: 77.415 speed: 9417 wps -# 0.803 perplexity: 77.424 speed: 9407 wps -# 0.903 perplexity: 76.083 speed: 9407 wps -# Epoch: 5 Train Perplexity: 75.719 -# Epoch: 5 Valid Perplexity: 127.057 -# Epoch: 6 Learning rate: 0.500 -# 0.004 perplexity: 87.561 speed: 7130 wps -# 0.104 perplexity: 64.202 speed: 9753 wps -# 0.204 perplexity: 69.518 speed: 9537 wps -# 0.304 perplexity: 66.868 speed: 9647 wps -# 0.404 perplexity: 65.766 speed: 9538 wps -# 0.504 perplexity: 64.967 speed: 9537 wps -# 0.604 perplexity: 63.090 speed: 9565 wps -# 0.703 perplexity: 62.415 speed: 9544 wps -# 0.803 perplexity: 61.751 speed: 9504 wps -# 0.903 perplexity: 60.027 speed: 9482 wps -# Epoch: 6 Train Perplexity: 59.127 -# Epoch: 6 Valid Perplexity: 120.339 -# Epoch: 7 Learning rate: 0.250 -# 0.004 perplexity: 72.069 speed: 7683 wps -# 0.104 perplexity: 53.331 speed: 9526 wps -# 0.204 perplexity: 57.897 speed: 9572 wps -# 0.304 perplexity: 55.557 speed: 9491 wps -# 0.404 perplexity: 54.597 speed: 9483 wps -# 0.504 perplexity: 53.817 speed: 9471 wps -# 0.604 perplexity: 52.147 speed: 9511 wps -# 0.703 perplexity: 51.473 speed: 9497 wps -# 0.803 perplexity: 50.788 speed: 9521 wps -# 0.903 perplexity: 49.203 speed: 9515 wps -# Epoch: 7 Train Perplexity: 48.303 -# Epoch: 7 Valid Perplexity: 120.782 -# Epoch: 8 Learning rate: 0.125 -# 0.004 perplexity: 63.503 speed: 8425 wps -# 0.104 perplexity: 47.324 speed: 9433 wps -# 0.204 perplexity: 51.525 speed: 9653 wps -# 0.304 perplexity: 49.405 speed: 9520 wps -# 0.404 perplexity: 48.532 speed: 9487 wps -# 0.504 perplexity: 47.800 speed: 9610 wps -# 0.604 perplexity: 46.282 speed: 9554 wps -# 0.703 perplexity: 45.637 speed: 9536 wps -# 0.803 perplexity: 44.972 speed: 9493 wps -# 0.903 perplexity: 43.506 speed: 9496 wps -# Epoch: 8 Train Perplexity: 42.653 -# Epoch: 8 Valid Perplexity: 122.119 -# Epoch: 9 Learning rate: 0.062 -# 0.004 perplexity: 59.375 speed: 7158 wps -# 0.104 perplexity: 44.223 speed: 9275 wps -# 0.204 perplexity: 48.269 speed: 9459 wps -# 0.304 perplexity: 46.273 speed: 9564 wps -# 0.404 perplexity: 45.450 speed: 9604 wps -# 0.504 perplexity: 44.749 speed: 9604 wps -# 0.604 perplexity: 43.308 speed: 9619 wps -# 0.703 perplexity: 42.685 speed: 9647 wps -# 0.803 perplexity: 42.022 speed: 9673 wps -# 0.903 perplexity: 40.616 speed: 9678 wps -# Epoch: 9 Train Perplexity: 39.792 -# Epoch: 9 Valid Perplexity: 123.170 -# Epoch: 10 Learning rate: 0.031 -# 0.004 perplexity: 57.333 speed: 7183 wps -# 0.104 perplexity: 42.631 speed: 9592 wps -# 0.204 perplexity: 46.580 speed: 9518 wps -# 0.304 perplexity: 44.625 speed: 9569 wps -# 0.404 perplexity: 43.832 speed: 9576 wps -# 0.504 perplexity: 43.153 speed: 9571 wps -# 0.604 perplexity: 41.761 speed: 9557 wps -# 0.703 perplexity: 41.159 speed: 9524 wps -# 0.803 perplexity: 40.494 speed: 9527 wps -# 0.903 perplexity: 39.111 speed: 9558 wps -# Epoch: 10 Train Perplexity: 38.298 -# Epoch: 10 Valid Perplexity: 123.658 -# Epoch: 11 Learning rate: 0.016 -# 0.004 perplexity: 56.238 speed: 7190 wps -# 0.104 perplexity: 41.771 speed: 9171 wps -# 0.204 perplexity: 45.656 speed: 9415 wps -# 0.304 perplexity: 43.719 speed: 9472 wps -# 0.404 perplexity: 42.941 speed: 9483 wps -# 0.504 perplexity: 42.269 speed: 9494 wps -# 0.604 perplexity: 40.903 speed: 9530 wps -# 0.703 perplexity: 40.314 speed: 9545 wps -# 0.803 perplexity: 39.654 speed: 9580 wps -# 0.903 perplexity: 38.287 speed: 9597 wps -# Epoch: 11 Train Perplexity: 37.477 -# Epoch: 11 Valid Perplexity: 123.523 -# Epoch: 12 Learning rate: 0.008 -# 0.004 perplexity: 55.552 speed: 7317 wps -# 0.104 perplexity: 41.267 speed: 9234 wps -# 0.204 perplexity: 45.119 speed: 9461 wps -# 0.304 perplexity: 43.204 speed: 9519 wps -# 0.404 perplexity: 42.441 speed: 9453 wps -# 0.504 perplexity: 41.773 speed: 9536 wps -# 0.604 perplexity: 40.423 speed: 9555 wps -# 0.703 perplexity: 39.836 speed: 9576 wps -# 0.803 perplexity: 39.181 speed: 9579 wps -# 0.903 perplexity: 37.827 speed: 9554 wps -# Epoch: 12 Train Perplexity: 37.020 -# Epoch: 12 Valid Perplexity: 123.192 -# Epoch: 13 Learning rate: 0.004 -# 0.004 perplexity: 55.124 speed: 8234 wps -# 0.104 perplexity: 40.970 speed: 9391 wps -# 0.204 perplexity: 44.804 speed: 9525 wps -# 0.304 perplexity: 42.912 speed: 9512 wps -# 0.404 perplexity: 42.162 speed: 9536 wps -# 0.504 perplexity: 41.500 speed: 9630 wps -# 0.604 perplexity: 40.159 speed: 9591 wps -# 0.703 perplexity: 39.574 speed: 9575 wps -# 0.803 perplexity: 38.921 speed: 9613 wps -# 0.903 perplexity: 37.575 speed: 9629 wps -# Epoch: 13 Train Perplexity: 36.771 -# Epoch: 13 Valid Perplexity: 122.917 -# Evaluation -# Test Perplexity: 116.723 took 124.06s - -# MediumConfig -# Epoch: 1 Learning rate: 1.000 -# 0.008 perplexity: 5173.547 speed: 6469 wps -# 0.107 perplexity: 1219.527 speed: 6453 wps -# 0.206 perplexity: 866.163 speed: 6441 wps -# 0.306 perplexity: 695.163 speed: 6428 wps -# 0.405 perplexity: 598.464 speed: 6420 wps -# 0.505 perplexity: 531.875 speed: 6422 wps -# 0.604 perplexity: 477.079 speed: 6425 wps -# 0.704 perplexity: 438.297 speed: 6428 wps -# 0.803 perplexity: 407.928 speed: 6425 wps -# 0.903 perplexity: 381.264 speed: 6429 wps -# Epoch: 1 Train Perplexity: 360.795 -# Epoch: 1 Valid Perplexity: 208.854 -# ... -# Epoch: 39 Learning rate: 0.001 -# 0.008 perplexity: 56.618 speed: 6357 wps -# 0.107 perplexity: 43.375 speed: 6341 wps -# 0.206 perplexity: 47.873 speed: 6336 wps -# 0.306 perplexity: 46.408 speed: 6337 wps -# 0.405 perplexity: 46.327 speed: 6337 wps -# 0.505 perplexity: 46.115 speed: 6335 wps -# 0.604 perplexity: 45.323 speed: 6336 wps -# 0.704 perplexity: 45.286 speed: 6337 wps -# 0.803 perplexity: 45.174 speed: 6336 wps -# 0.903 perplexity: 44.334 speed: 6336 wps -# Epoch: 39 Train Perplexity: 44.021 -# Epoch: 39 Valid Perplexity: 87.516 -# Evaluation -# Test Perplexity: 83.858 took 167.58s diff --git a/examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py b/examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py deleted file mode 100644 index 0021a7bfc..000000000 --- a/examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py +++ /dev/null @@ -1,618 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""Example of Synced sequence input and output. - -This is a reimpmentation of the TensorFlow official PTB example in : -tensorflow/models/rnn/ptb - -The batch_size can be seem as how many concurrent computations.n -As the following example shows, the first batch learn the sequence information by using 0 to 9.n -The second batch learn the sequence information by using 10 to 19.n -So it ignores the information from 9 to 10 !n -If only if we set the batch_size = 1, it will consider all information from 0 to 20.n - -The meaning of batch_size here is not the same with the MNIST example. In MNIST example, -batch_size reflects how many examples we consider in each iteration, while in -PTB example, batch_size is how many concurrent processes (segments) -for speed up computation. - -Some Information will be ignored if batch_size > 1, however, if your dataset -is "long" enough (a text corpus usually has billions words), the ignored -information would not effect the final result. - -In PTB tutorial, we setted batch_size = 20, so we cut the dataset into 20 segments. -At the begining of each epoch, we initialize (reset) the 20 RNN states for 20 -segments, then go through 20 segments separately. - -The training data will be generated as follow:n - ->>> train_data = [i for i in range(20)] ->>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3): ->>> x, y = batch ->>> print(x, 'n',y) -... [[ 0 1 2] <---x 1st subset/ iteration -... [10 11 12]] -... [[ 1 2 3] <---y -... [11 12 13]] -... -... [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration -... [13 14 15]] <--- 2nd batch input -... [[ 4 5 6] <--- 1st batch target -... [14 15 16]] <--- 2nd batch target -... -... [[ 6 7 8] 3rd subset/ iteration -... [16 17 18]] -... [[ 7 8 9] -... [17 18 19]] - -Hao Dong: This example can also be considered as pre-training of the word -embedding matrix. - -About RNN ----------- -$ Karpathy Blog : http://karpathy.github.io/2015/05/21/rnn-effectiveness/ - -More TensorFlow official RNN examples can be found here ---------------------------------------------------------- -$ RNN for PTB : https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html#recurrent-neural-networks -$ Seq2seq : https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html#sequence-to-sequence-models -$ translation : tensorflow/models/rnn/translate - -tensorflow (0.9.0) - -Example / benchmark for building a PTB LSTM model. - -Trains the model described in: -(Zaremba, et. al.) Recurrent Neural Network Regularization -http://arxiv.org/abs/1409.2329 - -There are 3 supported model configurations: -=========================================== -| config | epochs | train | valid | test -=========================================== -| small | 13 | 37.99 | 121.39 | 115.91 -| medium | 39 | 48.45 | 86.16 | 82.07 -| large | 55 | 37.87 | 82.62 | 78.29 -The exact results may vary depending on the random initialization. - -The hyperparameters used in the model: -- init_scale - the initial scale of the weights -- learning_rate - the initial value of the learning rate -- max_grad_norm - the maximum permissible norm of the gradient -- num_layers - the number of LSTM layers -- num_steps - the number of unrolled steps of LSTM -- hidden_size - the number of LSTM units -- max_epoch - the number of epochs trained with the initial learning rate -- max_max_epoch - the total number of epochs for training -- keep_prob - the probability of keeping weights in the dropout layer -- lr_decay - the decay of the learning rate for each epoch after "max_epoch" -- batch_size - the batch size - -The data required for this example is in the data/ dir of the -PTB dataset from Tomas Mikolov's webpage: - -$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz -$ tar xvf simple-examples.tgz - -A) use the zero_state function on the cell object - -B) for an rnn, all time steps share weights. We use one matrix to keep all -gate weights. Split by column into 4 parts to get the 4 gate weight matrices. - -""" - -import sys -import time - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - -flags = tf.app.flags - -flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.") - -if (tf.VERSION >= '1.5'): - # parse flags - flags.FLAGS(sys.argv, known_only=True) - flags.ArgumentParser() - -FLAGS = flags.FLAGS - -tf.logging.set_verbosity(tf.logging.DEBUG) - - -def main(_): - """ - The core of the model consists of an LSTM cell that processes one word at - a time and computes probabilities of the possible continuations of the - sentence. The memory state of the network is initialized with a vector - of zeros and gets updated after reading each word. Also, for computational - reasons, we will process data in mini-batches of size batch_size. - """ - if FLAGS.model == "small": - init_scale = 0.1 - learning_rate = 1. - max_grad_norm = 5 - num_steps = 20 - hidden_size = 200 - max_epoch = 4 - max_max_epoch = 13 - keep_prob = 1.0 - lr_decay = 0.5 - batch_size = 20 - vocab_size = 10000 - elif FLAGS.model == "medium": - init_scale = 0.05 - learning_rate = 1.0 - max_grad_norm = 5 - # num_layers = 2 - num_steps = 35 - hidden_size = 650 - max_epoch = 6 - max_max_epoch = 39 - keep_prob = 0.5 - lr_decay = 0.8 - batch_size = 20 - vocab_size = 10000 - elif FLAGS.model == "large": - init_scale = 0.04 - learning_rate = 1.0 - max_grad_norm = 10 - # num_layers = 2 - num_steps = 35 - hidden_size = 1500 - max_epoch = 14 - max_max_epoch = 55 - keep_prob = 0.35 - lr_decay = 1 / 1.15 - batch_size = 20 - vocab_size = 10000 - else: - raise ValueError("Invalid model: %s", FLAGS.model) - - # Load PTB dataset - train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset() - # train_data = train_data[0:int(100000/5)] # for fast testing - print('len(train_data) {}'.format(len(train_data))) # 929589 a list of int - print('len(valid_data) {}'.format(len(valid_data))) # 73760 a list of int - print('len(test_data) {}'.format(len(test_data))) # 82430 a list of int - print('vocab_size {}'.format(vocab_size)) # 10000 - - sess = tf.InteractiveSession() - - # One int represents one word, the meaning of batch_size here is not the - # same with MNIST example, it is the number of concurrent processes for - # computational reasons. - - # Training and Validation - input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) - targets = tf.placeholder(tf.int32, [batch_size, num_steps]) - # Testing (Evaluation) - input_data_test = tf.placeholder(tf.int32, [1, 1]) - targets_test = tf.placeholder(tf.int32, [1, 1]) - - def inference(x, is_training, num_steps, reuse=None): - """If reuse is True, the inferences use the existing parameters, - then different inferences share the same parameters. - - Note : - - For DynamicRNNLayer, you can set dropout and the number of RNN layer internally. - """ - print("\nnum_steps : %d, is_training : %s, reuse : %s" % (num_steps, is_training, reuse)) - init = tf.random_uniform_initializer(-init_scale, init_scale) - with tf.variable_scope("model", reuse=reuse): - net = tl.layers.EmbeddingInputlayer(x, vocab_size, hidden_size, init, name='embedding') - net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='drop1') - net = tl.layers.RNNLayer( - net, - cell_fn=tf.contrib.rnn.BasicLSTMCell, # tf.nn.rnn_cell.BasicLSTMCell, - cell_init_args={ - 'forget_bias': 0.0, - 'state_is_tuple': True - }, - n_hidden=hidden_size, - initializer=init, - n_steps=num_steps, - return_last=False, - name='basic_lstm1' - ) - lstm1 = net - net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='drop2') - net = tl.layers.RNNLayer( - net, - cell_fn=tf.contrib.rnn.BasicLSTMCell, # tf.nn.rnn_cell.BasicLSTMCell, - cell_init_args={ - 'forget_bias': 0.0, - 'state_is_tuple': True - }, - n_hidden=hidden_size, - initializer=init, - n_steps=num_steps, - return_last=False, - return_seq_2d=True, - name='basic_lstm2' - ) - lstm2 = net - # Alternatively, if return_seq_2d=False, in the above RNN layer, - # you can reshape the outputs as follow: - # net = tl.layers.ReshapeLayer(net, - # shape=[-1, int(net.outputs._shape[-1])], name='reshape') - net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='drop3') - net = tl.layers.DenseLayer(net, vocab_size, W_init=init, b_init=init, act=None, name='output') - return net, lstm1, lstm2 - - # Inference for Training - net, lstm1, lstm2 = inference(input_data, is_training=True, num_steps=num_steps, reuse=None) - # Inference for Validating - net_val, lstm1_val, lstm2_val = inference(input_data, is_training=False, num_steps=num_steps, reuse=True) - # Inference for Testing (Evaluation) - net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True) - - # sess.run(tf.global_variables_initializer()) - sess.run(tf.global_variables_initializer()) - - def loss_fn(outputs, targets, batch_size): - # See tl.cost.cross_entropy_seq() - # Returns the cost function of Cross-entropy of two sequences, implement - # softmax internally. - # outputs : 2D tensor [batch_size*num_steps, n_units of output layer] - # targets : 2D tensor [batch_size, num_steps], need to be reshaped. - # batch_size : RNN batch_size, number of concurrent processes. - # n_examples = batch_size * num_steps - # so - # cost is the averaged cost of each mini-batch (concurrent process). - loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( - [outputs], [tf.reshape(targets, [-1])], [tf.ones_like(tf.reshape(targets, [-1]), dtype=tf.float32)] - ) - # [tf.ones([batch_size * num_steps])]) - cost = tf.reduce_sum(loss) / batch_size - return cost - - # Cost for Training - cost = loss_fn(net.outputs, targets, batch_size) - # Cost for Validating - cost_val = loss_fn(net_val.outputs, targets, batch_size) - # Cost for Testing (Evaluation) - cost_test = loss_fn(net_test.outputs, targets_test, 1) - - # Truncated Backpropagation for training - with tf.variable_scope('learning_rate'): - lr = tf.Variable(0.0, trainable=False) - tvars = tf.trainable_variables() - grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm) - optimizer = tf.train.GradientDescentOptimizer(lr) - train_op = optimizer.apply_gradients(zip(grads, tvars)) - - # sess.run(tf.global_variables_initializer()) - sess.run(tf.global_variables_initializer()) - - net.print_params() - net.print_layers() - tl.layers.print_all_variables() - - print("nStart learning a language model by using PTB dataset") - for i in range(max_max_epoch): - # decreases the initial learning rate after several - # epoachs (defined by ``max_epoch``), by multipling a ``lr_decay``. - new_lr_decay = lr_decay**max(i - max_epoch, 0.0) - sess.run(tf.assign(lr, learning_rate * new_lr_decay)) - - # Training - print("Epoch: %d/%d Learning rate: %.3f" % (i + 1, max_max_epoch, sess.run(lr))) - epoch_size = ((len(train_data) // batch_size) - 1) // num_steps - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining of every epoch - state1 = tl.layers.initialize_rnn_state(lstm1.initial_state) - state2 = tl.layers.initialize_rnn_state(lstm2.initial_state) - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)): - feed_dict = { - input_data: x, - targets: y, - lstm1.initial_state.c: state1[0], - lstm1.initial_state.h: state1[1], - lstm2.initial_state.c: state2[0], - lstm2.initial_state.h: state2[1], - } - # For training, enable dropout - feed_dict.update(net.all_drop) - _cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run( - [cost, lstm1.final_state.c, lstm1.final_state.h, lstm2.final_state.c, lstm2.final_state.h, train_op], - feed_dict=feed_dict - ) - state1 = (state1_c, state1_h) - state2 = (state2_c, state2_h) - - costs += _cost - iters += num_steps - - if step % (epoch_size // 10) == 10: - print( - "%.3f perplexity: %.3f speed: %.0f wps" % - (step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)) - ) - train_perplexity = np.exp(costs / iters) - print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity)) - - # Validation - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining of every epoch - state1 = tl.layers.initialize_rnn_state(lstm1_val.initial_state) - state2 = tl.layers.initialize_rnn_state(lstm2_val.initial_state) - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)): - feed_dict = { - input_data: x, - targets: y, - lstm1_val.initial_state.c: state1[0], - lstm1_val.initial_state.h: state1[1], - lstm2_val.initial_state.c: state2[0], - lstm2_val.initial_state.h: state2[1], - } - _cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run( - [ - cost_val, lstm1_val.final_state.c, lstm1_val.final_state.h, lstm2_val.final_state.c, - lstm2_val.final_state.h, - tf.no_op() - ], feed_dict=feed_dict - ) - state1 = (state1_c, state1_h) - state2 = (state2_c, state2_h) - costs += _cost - iters += num_steps - valid_perplexity = np.exp(costs / iters) - print("Epoch: %d/%d Valid Perplexity: %.3f" % (i + 1, max_max_epoch, valid_perplexity)) - - print("Evaluation") - # Testing - # go through the test set step by step, it will take a while. - start_time = time.time() - costs = 0.0 - iters = 0 - # reset all states at the begining - state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state) - state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state) - for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=1, num_steps=1)): - feed_dict = { - input_data_test: x, - targets_test: y, - lstm1_test.initial_state.c: state1[0], - lstm1_test.initial_state.h: state1[1], - lstm2_test.initial_state.c: state2[0], - lstm2_test.initial_state.h: state2[1], - } - _cost, state1_c, state1_h, state2_c, state2_h = sess.run( - [ - cost_test, - lstm1_test.final_state.c, - lstm1_test.final_state.h, - lstm2_test.final_state.c, - lstm2_test.final_state.h, - ], feed_dict=feed_dict - ) - state1 = (state1_c, state1_h) - state2 = (state2_c, state2_h) - costs += _cost - iters += 1 - test_perplexity = np.exp(costs / iters) - print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time)) - - print( - "More example: Text generation using Trump's speech data: https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():" - ) - - -if __name__ == "__main__": - tf.app.run() - -# log of SmallConfig -# Start learning a language model by using PTB dataset -# Epoch: 1 Learning rate: 1.000 -# 0.004 perplexity: 5512.735 speed: 4555 wps -# 0.104 perplexity: 841.289 speed: 8823 wps -# 0.204 perplexity: 626.273 speed: 9292 wps -# 0.304 perplexity: 505.628 speed: 9472 wps -# 0.404 perplexity: 435.580 speed: 9551 wps -# 0.504 perplexity: 390.108 speed: 9555 wps -# 0.604 perplexity: 351.379 speed: 9546 wps -# 0.703 perplexity: 324.846 speed: 9579 wps -# 0.803 perplexity: 303.824 speed: 9574 wps -# 0.903 perplexity: 284.468 speed: 9551 wps -# Epoch: 1 Train Perplexity: 269.981 -# Epoch: 1 Valid Perplexity: 178.561 -# Epoch: 2 Learning rate: 1.000 -# 0.004 perplexity: 211.632 speed: 7697 wps -# 0.104 perplexity: 151.509 speed: 9488 wps -# 0.204 perplexity: 158.947 speed: 9674 wps -# 0.304 perplexity: 153.963 speed: 9806 wps -# 0.404 perplexity: 150.938 speed: 9817 wps -# 0.504 perplexity: 148.413 speed: 9824 wps -# 0.604 perplexity: 143.763 speed: 9765 wps -# 0.703 perplexity: 141.616 speed: 9731 wps -# 0.803 perplexity: 139.618 speed: 9781 wps -# 0.903 perplexity: 135.880 speed: 9735 wps -# Epoch: 2 Train Perplexity: 133.771 -# Epoch: 2 Valid Perplexity: 142.595 -# Epoch: 3 Learning rate: 1.000 -# 0.004 perplexity: 146.902 speed: 8345 wps -# 0.104 perplexity: 105.647 speed: 9572 wps -# 0.204 perplexity: 114.261 speed: 9585 wps -# 0.304 perplexity: 111.237 speed: 9586 wps -# 0.404 perplexity: 110.181 speed: 9605 wps -# 0.504 perplexity: 109.383 speed: 9601 wps -# 0.604 perplexity: 106.722 speed: 9635 wps -# 0.703 perplexity: 106.075 speed: 9597 wps -# 0.803 perplexity: 105.481 speed: 9624 wps -# 0.903 perplexity: 103.262 speed: 9618 wps -# Epoch: 3 Train Perplexity: 102.272 -# Epoch: 3 Valid Perplexity: 131.884 -# Epoch: 4 Learning rate: 1.000 -# 0.004 perplexity: 118.127 speed: 7867 wps -# 0.104 perplexity: 85.530 speed: 9330 wps -# 0.204 perplexity: 93.559 speed: 9399 wps -# 0.304 perplexity: 91.141 speed: 9386 wps -# 0.404 perplexity: 90.668 speed: 9462 wps -# 0.504 perplexity: 90.366 speed: 9516 wps -# 0.604 perplexity: 88.479 speed: 9477 wps -# 0.703 perplexity: 88.275 speed: 9533 wps -# 0.803 perplexity: 88.091 speed: 9560 wps -# 0.903 perplexity: 86.430 speed: 9516 wps -# Epoch: 4 Train Perplexity: 85.839 -# Epoch: 4 Valid Perplexity: 128.408 -# Epoch: 5 Learning rate: 1.000 -# 0.004 perplexity: 100.077 speed: 7682 wps -# 0.104 perplexity: 73.856 speed: 9197 wps -# 0.204 perplexity: 81.242 speed: 9266 wps -# 0.304 perplexity: 79.315 speed: 9375 wps -# 0.404 perplexity: 79.009 speed: 9439 wps -# 0.504 perplexity: 78.874 speed: 9377 wps -# 0.604 perplexity: 77.430 speed: 9436 wps -# 0.703 perplexity: 77.415 speed: 9417 wps -# 0.803 perplexity: 77.424 speed: 9407 wps -# 0.903 perplexity: 76.083 speed: 9407 wps -# Epoch: 5 Train Perplexity: 75.719 -# Epoch: 5 Valid Perplexity: 127.057 -# Epoch: 6 Learning rate: 0.500 -# 0.004 perplexity: 87.561 speed: 7130 wps -# 0.104 perplexity: 64.202 speed: 9753 wps -# 0.204 perplexity: 69.518 speed: 9537 wps -# 0.304 perplexity: 66.868 speed: 9647 wps -# 0.404 perplexity: 65.766 speed: 9538 wps -# 0.504 perplexity: 64.967 speed: 9537 wps -# 0.604 perplexity: 63.090 speed: 9565 wps -# 0.703 perplexity: 62.415 speed: 9544 wps -# 0.803 perplexity: 61.751 speed: 9504 wps -# 0.903 perplexity: 60.027 speed: 9482 wps -# Epoch: 6 Train Perplexity: 59.127 -# Epoch: 6 Valid Perplexity: 120.339 -# Epoch: 7 Learning rate: 0.250 -# 0.004 perplexity: 72.069 speed: 7683 wps -# 0.104 perplexity: 53.331 speed: 9526 wps -# 0.204 perplexity: 57.897 speed: 9572 wps -# 0.304 perplexity: 55.557 speed: 9491 wps -# 0.404 perplexity: 54.597 speed: 9483 wps -# 0.504 perplexity: 53.817 speed: 9471 wps -# 0.604 perplexity: 52.147 speed: 9511 wps -# 0.703 perplexity: 51.473 speed: 9497 wps -# 0.803 perplexity: 50.788 speed: 9521 wps -# 0.903 perplexity: 49.203 speed: 9515 wps -# Epoch: 7 Train Perplexity: 48.303 -# Epoch: 7 Valid Perplexity: 120.782 -# Epoch: 8 Learning rate: 0.125 -# 0.004 perplexity: 63.503 speed: 8425 wps -# 0.104 perplexity: 47.324 speed: 9433 wps -# 0.204 perplexity: 51.525 speed: 9653 wps -# 0.304 perplexity: 49.405 speed: 9520 wps -# 0.404 perplexity: 48.532 speed: 9487 wps -# 0.504 perplexity: 47.800 speed: 9610 wps -# 0.604 perplexity: 46.282 speed: 9554 wps -# 0.703 perplexity: 45.637 speed: 9536 wps -# 0.803 perplexity: 44.972 speed: 9493 wps -# 0.903 perplexity: 43.506 speed: 9496 wps -# Epoch: 8 Train Perplexity: 42.653 -# Epoch: 8 Valid Perplexity: 122.119 -# Epoch: 9 Learning rate: 0.062 -# 0.004 perplexity: 59.375 speed: 7158 wps -# 0.104 perplexity: 44.223 speed: 9275 wps -# 0.204 perplexity: 48.269 speed: 9459 wps -# 0.304 perplexity: 46.273 speed: 9564 wps -# 0.404 perplexity: 45.450 speed: 9604 wps -# 0.504 perplexity: 44.749 speed: 9604 wps -# 0.604 perplexity: 43.308 speed: 9619 wps -# 0.703 perplexity: 42.685 speed: 9647 wps -# 0.803 perplexity: 42.022 speed: 9673 wps -# 0.903 perplexity: 40.616 speed: 9678 wps -# Epoch: 9 Train Perplexity: 39.792 -# Epoch: 9 Valid Perplexity: 123.170 -# Epoch: 10 Learning rate: 0.031 -# 0.004 perplexity: 57.333 speed: 7183 wps -# 0.104 perplexity: 42.631 speed: 9592 wps -# 0.204 perplexity: 46.580 speed: 9518 wps -# 0.304 perplexity: 44.625 speed: 9569 wps -# 0.404 perplexity: 43.832 speed: 9576 wps -# 0.504 perplexity: 43.153 speed: 9571 wps -# 0.604 perplexity: 41.761 speed: 9557 wps -# 0.703 perplexity: 41.159 speed: 9524 wps -# 0.803 perplexity: 40.494 speed: 9527 wps -# 0.903 perplexity: 39.111 speed: 9558 wps -# Epoch: 10 Train Perplexity: 38.298 -# Epoch: 10 Valid Perplexity: 123.658 -# Epoch: 11 Learning rate: 0.016 -# 0.004 perplexity: 56.238 speed: 7190 wps -# 0.104 perplexity: 41.771 speed: 9171 wps -# 0.204 perplexity: 45.656 speed: 9415 wps -# 0.304 perplexity: 43.719 speed: 9472 wps -# 0.404 perplexity: 42.941 speed: 9483 wps -# 0.504 perplexity: 42.269 speed: 9494 wps -# 0.604 perplexity: 40.903 speed: 9530 wps -# 0.703 perplexity: 40.314 speed: 9545 wps -# 0.803 perplexity: 39.654 speed: 9580 wps -# 0.903 perplexity: 38.287 speed: 9597 wps -# Epoch: 11 Train Perplexity: 37.477 -# Epoch: 11 Valid Perplexity: 123.523 -# Epoch: 12 Learning rate: 0.008 -# 0.004 perplexity: 55.552 speed: 7317 wps -# 0.104 perplexity: 41.267 speed: 9234 wps -# 0.204 perplexity: 45.119 speed: 9461 wps -# 0.304 perplexity: 43.204 speed: 9519 wps -# 0.404 perplexity: 42.441 speed: 9453 wps -# 0.504 perplexity: 41.773 speed: 9536 wps -# 0.604 perplexity: 40.423 speed: 9555 wps -# 0.703 perplexity: 39.836 speed: 9576 wps -# 0.803 perplexity: 39.181 speed: 9579 wps -# 0.903 perplexity: 37.827 speed: 9554 wps -# Epoch: 12 Train Perplexity: 37.020 -# Epoch: 12 Valid Perplexity: 123.192 -# Epoch: 13 Learning rate: 0.004 -# 0.004 perplexity: 55.124 speed: 8234 wps -# 0.104 perplexity: 40.970 speed: 9391 wps -# 0.204 perplexity: 44.804 speed: 9525 wps -# 0.304 perplexity: 42.912 speed: 9512 wps -# 0.404 perplexity: 42.162 speed: 9536 wps -# 0.504 perplexity: 41.500 speed: 9630 wps -# 0.604 perplexity: 40.159 speed: 9591 wps -# 0.703 perplexity: 39.574 speed: 9575 wps -# 0.803 perplexity: 38.921 speed: 9613 wps -# 0.903 perplexity: 37.575 speed: 9629 wps -# Epoch: 13 Train Perplexity: 36.771 -# Epoch: 13 Valid Perplexity: 122.917 -# Evaluation -# Test Perplexity: 116.723 took 124.06s - -# MediumConfig -# Epoch: 1 Learning rate: 1.000 -# 0.008 perplexity: 5173.547 speed: 6469 wps -# 0.107 perplexity: 1219.527 speed: 6453 wps -# 0.206 perplexity: 866.163 speed: 6441 wps -# 0.306 perplexity: 695.163 speed: 6428 wps -# 0.405 perplexity: 598.464 speed: 6420 wps -# 0.505 perplexity: 531.875 speed: 6422 wps -# 0.604 perplexity: 477.079 speed: 6425 wps -# 0.704 perplexity: 438.297 speed: 6428 wps -# 0.803 perplexity: 407.928 speed: 6425 wps -# 0.903 perplexity: 381.264 speed: 6429 wps -# Epoch: 1 Train Perplexity: 360.795 -# Epoch: 1 Valid Perplexity: 208.854 -# ... -# Epoch: 39 Learning rate: 0.001 -# 0.008 perplexity: 56.618 speed: 6357 wps -# 0.107 perplexity: 43.375 speed: 6341 wps -# 0.206 perplexity: 47.873 speed: 6336 wps -# 0.306 perplexity: 46.408 speed: 6337 wps -# 0.405 perplexity: 46.327 speed: 6337 wps -# 0.505 perplexity: 46.115 speed: 6335 wps -# 0.604 perplexity: 45.323 speed: 6336 wps -# 0.704 perplexity: 45.286 speed: 6337 wps -# 0.803 perplexity: 45.174 speed: 6336 wps -# 0.903 perplexity: 44.334 speed: 6336 wps -# Epoch: 39 Train Perplexity: 44.021 -# Epoch: 39 Valid Perplexity: 87.516 -# Evaluation -# Test Perplexity: 83.858 took 167.58s diff --git a/examples/text_word_embedding/tutorial_word2vec_basic.py b/examples/text_word_embedding/tutorial_word2vec_basic.py deleted file mode 100644 index d7bc63fbc..000000000 --- a/examples/text_word_embedding/tutorial_word2vec_basic.py +++ /dev/null @@ -1,373 +0,0 @@ -# Copyright 2019 TensorLayer. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Vector Representations of Words. - -This is the minimalistic reimplementation of -tensorflow/examples/tutorials/word2vec/word2vec_basic.py -This basic example contains the code needed to download some data, -train on it a bit and visualize the result by using t-SNE. - -Once you get comfortable with reading and running the basic version, -you can graduate to -tensorflow/models/embedding/word2vec.py -which is a more serious implementation that showcases some more advanced -TensorFlow principles about how to efficiently use threads to move data -into a text model, how to checkpoint during training, etc. - -If your model is no longer I/O bound but you want still more performance, you -can take things further by writing your own TensorFlow Ops, as described in -Adding a New Op. Again we've provided an example of this for the Skip-Gram case -tensorflow/models/embedding/word2vec_optimized.py. - -Link ------- -https://www.tensorflow.org/versions/r0.9/tutorials/word2vec/index.html#vector-representations-of-words - -""" - -import argparse -import os -import time - -import numpy as np -import tensorflow as tf -from six.moves import xrange # pylint: disable=redefined-builtin - -import tensorlayer as tl -import wget - -parser = argparse.ArgumentParser() - -parser.add_argument( - "--model", default='one', type=str, required=False, help="The model name. It can be 'one', 'two', 'three', 'four'." -) - -FLAGS = parser.parse_args() - - -def main_word2vec_basic(): - - # Step 1: Download the data, read the context into a list of strings. - # Set hyperparameters. - words = tl.files.load_matt_mahoney_text8_dataset() - data_size = len(words) - print(data_size) # 17005207 - print(words[0:10]) # ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against'] - # exit() - - resume = False # load existing model, data and dictionaries - _UNK = "_UNK" - - if FLAGS.model == "one": - # toy setting (tensorflow/examples/tutorials/word2vec/word2vec_basic.py) - vocabulary_size = 50000 # maximum number of word in vocabulary - batch_size = 128 - embedding_size = 128 # Dimension of the embedding vector (hidden layer). - skip_window = 1 # How many words to consider left and right. - num_skips = 2 # How many times to reuse an input to generate a label. - # (should be double of 'skip_window' so as to - # use both left and right words) - num_sampled = 64 # Number of negative examples to sample. - # more negative samples, higher loss - learning_rate = 1.0 - n_epoch = 20 - model_file_name = "model_word2vec_50k_128" - # Eval 2084/15851 accuracy = 15.7% - elif FLAGS.model == "two": - # (tensorflow/models/embedding/word2vec.py) - vocabulary_size = 80000 - batch_size = 20 # Note: small batch_size need more steps for a Epoch - embedding_size = 200 - skip_window = 5 - num_skips = 10 - num_sampled = 100 - learning_rate = 0.2 - n_epoch = 15 - model_file_name = "model_word2vec_80k_200" - # 7.9% - elif FLAGS.model == "three": - # (tensorflow/models/embedding/word2vec_optimized.py) - vocabulary_size = 80000 - batch_size = 500 - embedding_size = 200 - skip_window = 5 - num_skips = 10 - num_sampled = 25 - learning_rate = 0.025 - n_epoch = 20 - model_file_name = "model_word2vec_80k_200_opt" - # bad 0% - elif FLAGS.model == "four": - # see: Learning word embeddings efficiently with noise-contrastive estimation - vocabulary_size = 80000 - batch_size = 100 - embedding_size = 600 - skip_window = 5 - num_skips = 10 - num_sampled = 25 - learning_rate = 0.03 - n_epoch = 200 * 10 - model_file_name = "model_word2vec_80k_600" - # bad - else: - raise Exception("Invalid model: %s" % FLAGS.model) - - num_steps = int((data_size / batch_size) * n_epoch) # total number of iteration - - print('%d Steps in a Epoch, total Epochs %d' % (int(data_size / batch_size), n_epoch)) - print(' learning_rate: %f' % learning_rate) - print(' batch_size: %d' % batch_size) - - # Step 2: Build the dictionary and replace rare words with 'UNK' token. - print() - if resume: - print("Load existing data and dictionaries" + "!" * 10) - all_var = tl.files.load_npy_to_any(name=model_file_name + '.npy') - data = all_var['data'] - count = all_var['count'] - dictionary = all_var['dictionary'] - reverse_dictionary = all_var['reverse_dictionary'] - else: - data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True, _UNK) - - print( - 'Most 5 common words (+UNK)', count[:5] - ) # [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)] - print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) - # [5243, 3081, 12, 6, 195, 2, 3135, 46, 59, 156] [b'anarchism', b'originated', b'as', b'a', b'term', b'of', b'abuse', b'first', b'used', b'against'] - - del words # Hint to reduce memory. - - # Step 3: Function to generate a training batch for the Skip-Gram model. - print() - - batch, labels, data_index = tl.nlp.generate_skip_gram_batch( - data=data, batch_size=8, num_skips=4, skip_window=2, data_index=0 - ) - for i in range(8): - print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) - - batch, labels, data_index = tl.nlp.generate_skip_gram_batch( - data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0 - ) - for i in range(8): - print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) - - # Step 4: Build a Skip-Gram model. - print() - - # We pick a random validation set to sample nearest neighbors. Here we limit the - # validation samples to the words that have a low numeric ID, which by - # construction are also the most frequent. - valid_size = 16 # Random set of words to evaluate similarity on. - valid_window = 100 # Only pick dev samples in the head of the distribution. - valid_examples = np.random.choice(valid_window, valid_size, replace=False) - # a list of 'valid_size' integers smaller than 'valid_window' - # print(valid_examples) # [90 85 20 33 35 62 37 63 88 38 82 58 83 59 48 64] - # n_epoch = int(num_steps / batch_size) - - # train_inputs is a row vector, a input is an integer id of single word. - # train_labels is a column vector, a label is an integer id of single word. - # valid_dataset is a column vector, a valid set is an integer id of single word. - valid_dataset = tf.constant(valid_examples, dtype=tf.int32) - - # Look up embeddings for inputs. - inputs = tl.layers.Input([batch_size], dtype=tf.int32) - labels = tl.layers.Input([batch_size, 1], dtype=tf.int32) - - emb_net = tl.layers.Word2vecEmbedding( - vocabulary_size=vocabulary_size, - embedding_size=embedding_size, - num_sampled=num_sampled, - activate_nce_loss=True, # nce loss is activated - nce_loss_args={}, - E_init=tl.initializers.random_uniform(minval=-1.0, maxval=1.0), - nce_W_init=tl.initializers.truncated_normal(stddev=float(1.0 / np.sqrt(embedding_size))), - nce_b_init=tl.initializers.constant(value=0.0), - name='word2vec_layer', - ) - emb, nce = emb_net([inputs, labels]) - - model = tl.models.Model(inputs=[inputs, labels], outputs=[emb, nce], name="word2vec_model") - - # Compute the average NCE loss for the batch. - # tf.nce_loss automatically draws a new sample of the negative labels - # each time we evaluate the loss. - - # Construct the optimizer. Note: AdamOptimizer is very slow in this case - optimizer = tf.optimizers.Adagrad(learning_rate, initial_accumulator_value=0.1) - - # normalized embedding - normalized_embeddings = emb_net.normalized_embeddings - - # Step 5: Start training. - model.train() - - if resume: - print("Load existing model" + "!" * 10) - model.load_weights(filepath=model_file_name + '.hdf5') - - # save vocabulary to txt - tl.nlp.save_vocab(count, name='vocab_text8.txt') - - average_loss = 0 - step = 0 - print_freq = 2000 - while step < num_steps: - start_time = time.time() - batch_inputs, batch_labels, data_index = tl.nlp.generate_skip_gram_batch( - data=data, batch_size=batch_size, num_skips=num_skips, skip_window=skip_window, data_index=data_index - ) - - # We perform one update step by evaluating the train_op (including it - # in the list of returned values for sess.run() - - with tf.GradientTape() as tape: - outputs, nce_cost = model([batch_inputs, batch_labels]) - - grad = tape.gradient(nce_cost, model.trainable_weights) - optimizer.apply_gradients(zip(grad, model.trainable_weights)) - - average_loss += nce_cost - - if step % print_freq == 0: - if step > 0: - average_loss /= print_freq - print("Average loss at step %d/%d. loss: %f took: %fs/per step" % \ - (step, num_steps, average_loss, time.time() - start_time)) - average_loss = 0 - - # Prints out nearby words given a list of words. - # Note that this is expensive (~20% slowdown if computed every 500 steps) - if step % (print_freq * 5) == 0: - - # Compute the cosine similarity between minibatch examples and all embeddings. - # For simple visualization of validation set. - valid_embed = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) - sim = tf.matmul(valid_embed, normalized_embeddings, transpose_b=True) - sim = sim.numpy() - # multiply all valid word vector with all word vector. - # transpose_b=True, normalized_embeddings is transposed before multiplication. - - for i in xrange(valid_size): - valid_word = reverse_dictionary[valid_examples[i]] - top_k = 8 # number of nearest neighbors to print - nearest = (-sim[i, :]).argsort()[1:top_k + 1] - log_str = "Nearest to %s:" % valid_word - for k in xrange(top_k): - close_word = reverse_dictionary[nearest[k]] - log_str = "%s %s," % (log_str, close_word) - print(log_str) - - if (step % (print_freq * 20) == 0) and (step != 0): - print("Save model, data and dictionaries" + "!" * 10) - model.save_weights(filepath=model_file_name + ".hdf5") - tl.files.save_any_to_npy( - save_dict={ - 'data': data, - 'count': count, - 'dictionary': dictionary, - 'reverse_dictionary': reverse_dictionary - }, name=model_file_name + '.npy' - ) - - # if step == num_steps-1: - # keeptrain = input("Training %d finished enter 1 to keep training: " % num_steps) - # if keeptrain == '1': - # step = 0 - # learning_rate = float(input("Input new learning rate: ")) - # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) - step += 1 - - # Step 6: Visualize the normalized embedding matrix by t-SNE. - print() - - final_embeddings = normalized_embeddings #.eval() - tl.visualize.tsne_embedding(final_embeddings, reverse_dictionary, plot_only=500, \ - second=5, saveable=False, name='word2vec_basic') - - # Step 7: Evaluate by analogy questions. see tensorflow/models/embedding/word2vec_optimized.py - print() - model.eval() - - # from tensorflow/models/embedding/word2vec.py - if not os.path.exists("questions-words.txt"): - print("Downloading file 'questions-words.txt'") - wget.download('http://download.tensorflow.org/data/questions-words.txt') - - analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary) - # For each question (row in dist), find the top 'n_answer' words. - n_answer = 4 - - def predict(analogy): - # The eval feeds three vectors of word ids for a, b, c, each of - # which is of size N, where N is the number of analogies we want to - # evaluate in one batch. - analogy_a = analogy[:, 0] # [N] - analogy_b = analogy[:, 1] # [N] - analogy_c = analogy[:, 2] # [N] - # Each row of a_emb, b_emb, c_emb is a word's embedding vector. - # They all have the shape [N, emb_dim] - a_emb = tf.gather(normalized_embeddings, analogy_a) # a's embs - b_emb = tf.gather(normalized_embeddings, analogy_b) # b's embs - c_emb = tf.gather(normalized_embeddings, analogy_c) # c's embs - # We expect that d's embedding vectors on the unit hyper-sphere is - # near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim]. - # Bangkok Thailand Tokyo Japan -> Thailand - Bangkok = Japan - Tokyo - # Japan = Tokyo + (Thailand - Bangkok) - # d = c + (b - a) - target = c_emb + (b_emb - a_emb) - # Compute cosine distance between each pair of target and vocab. - # dist has shape [N, vocab_size]. - dist = tf.matmul(target, normalized_embeddings, transpose_b=True) - """Predict the top 4 answers for analogy questions.""" - _, pred_idx = tf.nn.top_k(dist, n_answer) - - return pred_idx - - # Evaluate analogy questions and reports accuracy. - # i.e. How many questions we get right at precision@1. - correct = 0 - total = analogy_questions.shape[0] - start = 0 - while start < total: - limit = start + 2500 - sub = analogy_questions[start:limit, :] # question - idx = predict(sub) # 4 answers for each question - # print('question:', tl.nlp.word_ids_to_words(sub[0], reverse_dictionary)) - # print('answers:', tl.nlp.word_ids_to_words(idx[0], reverse_dictionary)) - start = limit - for question in xrange(sub.shape[0]): - for j in xrange(n_answer): - # if one of the top 4 answers in correct, win ! - if idx[question, j] == sub[question, 3]: - # Bingo! We predicted correctly. E.g., [italy, rome, france, paris]. - print( - j + 1, tl.nlp.word_ids_to_words([idx[question, j]], reverse_dictionary), ':', - tl.nlp.word_ids_to_words(sub[question, :], reverse_dictionary) - ) - correct += 1 - break - elif idx[question, j] in sub[question, :3]: - # We need to skip words already in the question. - continue - else: - # The correct label is not the precision@1 - break - print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total, correct * 100.0 / total)) - - -if __name__ == '__main__': - main_word2vec_basic() diff --git a/examples/text_word_embedding/word2vec_basic.pdf b/examples/text_word_embedding/word2vec_basic.pdf deleted file mode 100644 index 6dc5b9221ea14a0fd6fec68f664482dd1e481ae9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 113953 zcmZs?Wmp^C8a7%e6iSO0Deh7zQk(+8-HUs1cXx`rySqb*1&7k&?iO5vI|PR>ec!#m zv#)c`A429@Gqdt!ves=6wW6pv;}<4&6zZyFvdUJJFJvrac7~QHe0*fgDjxPGWXvK4 z&IZfTQZJ+#xN_J8yH*Invrq- z`&Qh})|rg$-~Ywjoh6i=4V+EL*#90tVODgsGg3BjCew!9M^qekZxeTCGG-|o*gb{+ z^D6S6S1B@GGG+}0LrW7QXELsTuJ`{;=L-w$z<)o0oQbi8fv}xB?5ckce_`d}{Pz#+ z0wS>4nAkcyk@5U<1!f^zTRYf0Z8BXHW(h|-7kje5B~XUV#L>Xk$=<-x#Ma0Ib{g!6 zsEMnEk;yj+*u?%V(ZAyX05axpCQf!Pjz+LL{jJO2&-M32=l@jgKg0hP@_#D$cLM*N zvABgbtb%0B;?}U*h?*GL8N?_ZH$Y@T8@2A@||R&*4$jz$PB?1G#LHLnMAagrDCMwU^USQv^B3<#j?RvQez8vbm4Expg zzX*8k!OlHMCw+2$HaU1IUG;l99oN5kdAfPpThrGWdwKZ!!l$dgvtN0Ydv{OzvU~XA z{v-f=+3DJTiafgMdhYjYe7XC3*FihqUw%)OjgR$xM8#dE3<4%a92?U|%r-?zrJL2W zxhl*L5;0J#hGb$Jo6*Vl)dK1#42|=rUBBivpv=U;#$DFqeQ2U`wmL_{lTNuVM@QjP zhI%EF$k}ru(D{0FD}LEeoXe5n;-i4Y((60O`A?Do+dk$f`^OP2<&e<5Eg?T$h2A)w zt?7NUlFn6x(%d>(wyo^G;5|M|DxasQ46j@V|A03x2#-oRlDJ~;V!x+|Xj#*3eem+j zAnqFZ^>VHM@^C2QWFK{{>5|-- zQRq{Z^ZjedC-T&UxG#P+9O|0P6LXNd$p9!ooDox3lB9Zt6@g1G3ZO^NgI}Cw!-+@| zGG++@GHv~oA2bGEeZglH7CAg%OHDo4%A+wF$&F7P?YO;ql;YRrKq28&NRT__A_&Rc zbDVylwAtf$NMn1?QXoMbLAn!etkiIZY|2yA8^Q{xz3E>(KjvvZ(g!V@g3^7jb1Q39 zG?C_KeZOT)uhMo4MT-F!C;1M|dncZa>`o6xCmm+3STxR3ymAJ9B6ib6GwS3Ow&@BU z`;4f^?6D7ysHtWU2(`SgS?>hSz@?AS*#I#ShiddO`+5`{+SJ9hpdOTgg|1QNtZ zV&Zq2)CybJIiSY;c?Pj=kgE#U9AR{nxfj5G40>u^o8jONhhu`wRPmv^z%(B->cU>_ zUCu^%qrhmi-MFAvsvQ2EC3F3y0^@9h`3HMb?N*t#BXwl7C7_|SOG7L}v|*VKR$a9u zbo1JpdB2Y1cUVB?UP{8&aRwWs-_oifTW9{xE&;UuA|}JQ9?ZDaUD;7r(teJTa5(N?`w>AbmU89J-q(|FB6+#9);lh7>n z5w5xjfO9Nd$Ha|IR~M6SmH~vl0OUg)s!S`_+6*xgkn4i9%U!C3rh;+Ph=y@cl>mP- z6y@Gs(Re|9w7npHb6WgU?XB1#Xz%kS@e*&W?Q_x-nZz8(bzbY+J4zLmYs_Brl9VeX^cBC9h1^>kE6E`Kdn2D3qzV_F3wlJ1`rAFuJLD+TL6;=$J;Aj4B~TfXLM0}Hi&xqik);1|3t)-~(%tIbHQ5d$T5eVWD2_roR#-V326 zC8^S;Ha`V2E0RO=|1?aM4s%x>`*1BX$%~tt*D|6Er|oU!b=A;bUW-SdgD=!~Tg9=h zhxI{s>T4D%qPv-^B1@fah0wXpSFh<0xSQ!uCDp`8NhUvsHWHlU=Pf-oWEL-s6#gTFB9E3~fiA z9T!5oi#}Qo2l<#n+$D~m7@vP5E-xE-RZo|kOG0OvOL5Ir^gqcRbHxfwg=8K*fwJJ{ zdfZx|Hc0|5L|5~(Ntw0u=N8^|Nz&NX^dBvRnI9Nq9KYPlG?4EBt-{6nIP*64pOwLM90QH$MHcHN zG*s66wTSmLH;T3;OyGerZa$$vR`};Kd8){a;LxRQMcfDe!gru&cDBxwcc$%O8=b*# zH6deo0!d%I)5nj&^+=>13zjEaMgof^q;EeCXsvNBjm)j+=JH|kpRDY%_D5z1R5;PRw0w)&UuG!VVPC? zlbxgRAL|*Q@3faB7{Co?;*3!cQtJC5=0u^^@R=$=4$Y)-9Y06wVS48F9zm8!hoecp zw!;iMn;|ygO%E12M+sra&FlSdbhqsbryUHJU4d3YepO0#6L8f*Z7_#)kPmRf@8*0;MtZ9 zwV9UOC-mpPftU@3&-Vu{GPNbCw!V#8YEN+y++Kh}zo%nPxto-*bndeXim4J~z9{EN z10vK&EOAYIqRJ@wpwHaAohQgOSJTnrHNe%!5!So>sN)^K+pE+X?lRPhlh9E&4*54J zxz>G6>a9*6jK=~`pSo=dbn1EahOY{7M0T}wFkIwOtbwyjyi23G;OE3m=u`%z-xF~# zOJ_Hdx$$D%^s}nKJEbX|!bQ6*APa!a;EMnuK|wUqlMAEn7HiihfX`FIaMlw(HF(Ty zsV}c%GVR@>hMW|k)}?uMC>(voLD+5Xh06b~+DA_ouHHVpeCJZ59P0^voI8-|`_Ki@ z{&EzDd=wghw)C{{tmDg!uje9$kd4=ZO)Y#QvF^1h>9R5NvgS#gCBAhX%|r9deP=f7 zD7(BEcDAJL^ZDD`&5QCz7%+)~J1SIyj}3?Oyiv%j%lY`r+R_UHe`EWE!v$1Y5F7a1 zniFxS&emIMbH`R4i%Fg7WE!JddCG@U2TY(i{Jo+v${3?Kk%!(b=0%~&?&B`nMl@&H z%O>1-TY7hX4Rq-6wazyYF(E}_OR?Ex)!)kS)nbC$(g7?vInpKSh!|HolcpzArnVC; z$}BqBIThBSS0OHId%VYb*xz;@Sr3v{m;lG3NwNniZg zpUjOMxmTwUDS?cTl@~llDrV0W`bFIql8mUXiZi|T`I(#0wtx`$gs;b}5$&Ws+Y5t% z5{q-qb6eARwASgGr2))Djmh(b(SvshQu{yH*OWlrtj$Q@_>pf*Sux8F_`3j0n{lec z*+0sY1?0kk%)Z$i&CcwD{BrsTehZuPHu{ZF#=UMKUis6Qr`jiP7gR9KOxEgVw+Qw8 zNwf)KlfNm`#55vNO@a(h73#Mh%mNlm+&1ieaB3{9Gu3=}t*zK- z-^(~CI=!zuQB4Y{%g~ zWN(z03&=&CDhO!HjX3pk4+5N5vgY=u$NN}ox(&8rJEWMU`LBs~Tudd%no}K*m1|P5 z-^o^GWa7lk&8NgqSse(pugdLTzcueFC?QbtYO)b*S))K`LbvF(reBzgwv8Bzf76r( za$DSO7WSm@8skqg^Vi9Q4iyJfYzf`eLNk&Z$@px#vh;ZhjI|qNnYnm3CgZd1eEq6; zt{eHINqe&~#jo~6&I|dIAL+C#%2boCYV*+)|5-(z*!!S?AI*=H@tlfz=$`uRFUw^h zVKVj;COLge56|rQa~VVzZ5(h9$22itT3;LHRJ^IW@hqu*%LWJn|hg#CJA7^fzo!(v3 zEdU9uneY1wD0qqqK0P2c+zp1kXU;2l-!0}sp*i!JJwOR<*GLdQd*Ob1NPe{Wu1jgwWZvXW00R{OROVY@~XKE@`7?$@8HOvqGpsv|Cc#Oh!?P8|6|s zYJSgF-Xp^)?`xC9J>KhD2vTQ>MRZj1;eE9OmBf4fi9fJ4+BbpGrZKow$!qhQ!uEwO7iGy^UpK%6B0e$; zzjZ`<$*2A?yP)D-Z!ARRVzTbywm$R%XLxWSCSE``?3C9J5>2E1=f-4ME12xk?}V2H z?BdcZ6n(nj254?aI9SSbu6CG>hJJnU@vyZ^yZiJRlapaV`&j);{!_=rRpLC45 zP~GvSEq=g<&awOA1crEjuG@(-x)zTIaAUpSEIzJWU?|B&KinLHvX2SfCCY;s1LB)Evb z{Ogodfc7^Fec|TR%y{GDZ8~vwv=#lyZYQXEb0}_N$n@diM4}&(-7raX3`O-Ai;;si z0N?0`TE(+%DP*S3eZ)&F%nZ;LeU~urnnD?~PBzz<&8-Eb0hgCApeUHcnapfSuE6-*un)eGZFl*u8M$LDehSl1z8bHyMJj8x%u;Vg#`01ljrb=Ib_joMb`*{}7%Jx!n=Cou zf>f3n>NV?C)e~(RJ3}cziB5_`UgFXJ$`#z<5I|<$6?Ra`8}e!MIe3BHwmX~Ie9Q(B zH)Lt9*~2p4@dSVJa7nE#)GTj*L0L`|4{1dmt;Z^%zEI(GLcWwdz>SEGH$brPs7xsU zImmetvxMP2v%dr$5^is}hEiP|GA`zjp#Xl)!u?jy?R_Vu+?1v1Vm!DHaEHn=Dcq8{ zSjJt(W8@PWlv&=t+${wHorPZsI(Qw|9IxxqWc;-6w>cL#$U7w9EanSn(T(Oy_ot~5 zx?E)vUyePQm!VFyng&KjXPliWoY0?~BU`)(k8gC#nt5lzr)1lZFLA>F|31NVPhY8x zjG)R$R07XJ=e?Y~xwCO<<|^s_Hev9j*T?Uoux(mk=)+)v`3B|)MOAMYt8nArZThLL zDRLRFtDPc)X)$t3-7Z`-J>nSsq={ACbsjvy$7hi2Rb|cl+-~CR)q4}IDt@tzoJ02g z@|*N|=wK?<=!7Dt8bUCsvu^6C)Klbnh*PHE!b5yJ)3W)rvob**2$hk^+OYCsP4jbxi9$lZRJ^O7PJLF1_e}9Io##0pdHW%)VlV2L)OL&?$Asc3A>TYkYU%O0Yv;$>U1Q~Q4}9oJyDOg$jUqoUprqWPe_GveX;h(e0SFzwY=UpC@EPxe zDF^S!PoQdD0oy~1k3`fLKN(HGG?^gHygQ7r7Kog~1m&`854nXbRmUJL=O1Nb+~Sj- zfI`x&R0_X4CX7P&v&An}Tu)Sz*>_!{D;HP51UW^Z|rLTmXXXiYswkL{&Oa+|KOuEwx)hH|HaHFl6z+j9!AtSC;O_b zcS-5U2?K#QAyyl>8ibw_!iUH>sy?ER3SRV@?2LQ?x;r3t&FlUrmwbya9KL=j^+iM@2@kUXDe2bE8v8$+RZYy@YVJ zL&o=bC(jk|k;FtFzJv|~Wv?u>lczTMXeDWWZ%dPWAuvx1mqk*KDLOzl1;(Z=3- zJxCkk(Cjq(T!w*rLuwlj*XRPQ0TywbHj!Qhp@jBiW9LLZp_4v&elOrJ*CK&BB*3Xvq1tAMo$nI zS(IbY>-TPVv@wp(DTY5kiT{z6D^A9i%2XP4?C67gqmo44Om`xwhC@oyO%mEbaEhP1 zbT?gf`wU2XcoZ!P*&phZT;$Zf5$8@+ZpOYDHDVMM@n6b3a9G{;aOzzlZsZXR+Ftd~ zuui}s#K(8pw%TpVd}?pc$*05&CR8%am`i@f8b;Jtr>SF;gcxsR6pNC8JT&K@H|Y*t z$TO4P5R+?uolT@rAw}wNi%ndPxNYFk3D?-6TV&h4UGF+ym(KN$9awVO9q@Qh`MzvV z#C{T{Ov}NP>C``3)$ZbD|2)hmuhq74v2Mz$7YMmp3i^<$K!2K6;;V-O0nwZzO2VjPf>NOM0GYrGot#O|^^v>;3ZsMIF~jEFefuvIt)zrahw0Ny$=?DITc_??4j z6AC`LGjG}+-JZH?d=8)+3i04zIq~lhteWmtbB=`()CZg&URdepB>u4Rx;58feJA>M^1v(IwCgCcrt>!mLoui_f+^ssond_ z6pL@Vj<(PU>8>4GkQsZG@!wLjj#DH3Bm+S`Wo`rCeq$vyZ4c0Z&yaLhML`fqr#%^t zzOX^o(~#|-<5MWwR-fnz%!q<`(0&vp1))lwdtZ{dyB~!qD-(=f$`9Q0L>;%8isT<9 zpvf!E{+%<$4V{Km8Bfv$4DFnx-E9@!KF=Hqi2H6Ly6es4xcxa~B76|Qh$#6&Pj~MC zNtSGP+=LuiPsFm$+c0`{(7_G!lt+JVveCY@q_JaS3)X%hQIcl4x0pTcTD89n96b5f z4MjW%Zs0U9SWh#tV^4NuMz-Re-L*5%-fP|GF)IiTw2e$v;+s;;wFRYH;H{P+ml!Kj z+B`>#i8X{0(3=>hFk|hyIKyG9A~Qw6ME-v$p1s;b_BnsZbJN@Nr>S;GJY@J#Nk2Wy z*w)(BLT`-2CLhDwgF}7Dl0XZ&;cSMK`6+`(4Za!#eV7}H+)F$BR=PE<=64~1%4pLZ z>2mD3WF?veoS{(n9>!=-ZD$q{tDC=uYP0N2PI93iU38)!{;*b{oW!gniL}v97OIBuuRg@$e9Ow=+ri+dZ{Qan9HcojE0+^6COl zPebGy_5`nIdq-fA%5Qfss+$c=(6Td)N44+s7riygmv$oh|8RfI@mIa8xu*T| zyWbmAJ+U&L501wBr1heZmxn!>d)2e`bW5>C-f>lv7;2ZQZ|3ub=f4Llv|9Vc9Pz{{ z@u(N9Qj{IvbqK!os*oL#7VR+BfL`$0-L~^xf02Y17^pMGBQ>6v)o-x|am?ap_D}93 zBaQKs>k5&gM?}R`n)7boI-BvIs7EA7$qwI{((+}@BedS}S7nCg%-~3TRGftAwEXd( zGhjL`({HURm`)q%mqEHF@W2+^?aB>}aTc)t+#~nA#U0#V@tX+2VN;RoYj{Ok4AZ6j zvFb6=eof0CE*_nFKT{yunPi|lt6+lXX!>y$uCj!0XCaOI?h~Cgn}z#Q=TNb;T}6ik zRwTWN(KC*rHy!gq(jwQHi@1cf1jQ%UPEnkIlPylR54_+r_8q4CJl|LpCIKUN-A*n7 z#Kn2JQ}R)D_W7m_Vnx;by+Y1-dU{;6Zj#>`RX0&g8u!x;xA)sy0O~K*9Rtb~pF&&u z#YRn9ha;GoWp!>V;_Td2L0C7MaRpkWnEA@1^gW#~nv9tuy5=)~5+0gN!=^f7JQVA#vz$ox#%&aW_}Mb~X-Df=Rm2292{Q zh3F>(Q*XZ;8Sh4qyy3(CN&enMKOYH;hpJ@k8+dHULxOkqqXylzZc0JA+lUGFjVSl; zyVYMc@~0oZ*9`CopEo-?$%UA8>a1{j9ujkOKLqfuQ!y67q(N<#U^s_siSI&jU%&yG zJ)|av3E zP1(^KhK-F8lhZPOl)m1k++;Tb>$j#6N#!l^hdw`^-k;_v;lcS~-_WXacyo(-5%pPi znAuzIDNJuZ=(Z-{v9jrZb(-UDwTWq$!JDMy&{uYFqV+vO@eOE?+?nPOCj0-I-Nmv1;KqI=_?8vMLWqEeG;Zb?7OFK4 zFvEC-2i|Vx>nxo3RT)gh;!FM<0pKgpB$=}O!v^?=_g(W0+}*dA)#RkwG`u749ubn? zlGxA?vR8C{rslOgWE0vBRDQNa3;LqV-a*K_P1G*`Kq236vqLLLJmZ@k&YFbg9v!sL zH3Oi0xTRcAKHv$Ct(7j>9u(VjU7x%qn6KsSeQO0!Pb@rO5tfHkNW8BAkYM9yb=O)% zE9{cm4PkH73^j3T53g!aQ7QM)+S4SIIGSMg(V9<){~}!pH=eAAu{{s&NJJB7!@tNF z%=VL(sjZ{Tk2=0YHrF3ysXQJpGxIvr)z^GEjfm3A&^k_%-78UWmttYWJV2QiNd5zd zV)|Dpt@eyoaS7Gl@n$xk0Hjl2V@vfz;p7xFYx8gr=qyI=hckD-dj$hp{7Va6T?y}$tsHaFRai|feCLy zL3L>gA#U%V48C~_yq<3N--#Y2a9Co$y+jKpQPW8b<>2S`h`h>`?p5+sa!~P~wyNQ5 z!3i1D0ZaK56(X>@TMW0lCVM>iDz>6HU+iK}Mj2;>oUo~P+t^NWvP*)1`8;bTVkE}Z zH~XrjudN7VYXP0Vku`~?HQZLJ1bq1R#xqthU_|W+_lqSTMBuo5+5mN>jG2P6C^xrg*VN>gntc2GapMq<`6!;g0 z&I>oO>uQSw3uzUFSCI3HrmCLO$3y~!d!3H#We%n?ew!}@vh{PZ9S3{`A?l(nJr+b( z2aKvVG5kVGlf;WSE!P+?Iq$@kE@*?el6w|gzf2^t-vIXTNv;bCiS+_mNu2GBLiB$I zg_e_wNjhSWtoA{4JxY}@bUnANV|?rKXSqB5BUlmsD#ryz2AH2IvU*|bh2eq1hb;k2 zkEJ72HWT}$?aA<+7(Qy?60SpchjL6uo3AKj$H1+KdPjoyEpwJ_vd?jAq4>VAhid797Ek?w{kD>bNo-=9lldW!1?L0MwLe0@@R+Mfj7gnL$c4kH zb>&8V2#Q&+FiKqa{C+%1pN{klWe64)h`Sl1Cm?|cnkrH-Bg+%jVB%bg2R67(F~gh5 z8%LanZGB*;g9RKQG2bU^f5KF(F~cr4zAx9M;pb03&8F0jI96g;FV1q12cItppI>&?b6D+@@=iS*R+99@|8@5+lo~D+>yh`3M2Oe^$-q2a$=C_?Q|LR4 z*;pmDqxfDxe1_-Kk#3wNA82j3rH{ATdF3DscNAOskt#ay`^nPUu(Hk2Ul_Jzqb4eqzRxBMjTtUvyUO@|IebvAD}uKD#kvv zQH9I5rEKMwLAVC|qrKp-`?+mermBIn9Sw+dsOMXBwcgfxSd#?kkd-O0NSt@oZD`*) zL#`jw^-9t6=p6G0&ztMD&Dk)u8MZxPf!|%JtSTigyJ5cXE$?Z1eAcCIt6(6v9{K|o zL_;S-tKuCV=Qt&3_M;@)wsK=GWVrw9{wjBP=f73(;$yYJ4=@II(tzMj`LH@(gGWyd zvYV67iNpTdk+J>)OEAIEVgvICCKyJr3fKN)M`{je`cmumLYJG%ojirS?G4wE`+09K zE?$;Pc$Z|OSGtow1UK-jM+`G_WvRb(nUBBIb0XJ5)3o z3YwY5_U(J^&Ux!?dpd=E*VCpYuy|T{x1jAf1Het1L!0rt{HhxjYViP-Hc?JED5`{*+d_FUM!aPi#t79}x zg&L?s8iK2+6trHziOE2Ziv_E(=7^g(ZOSt{^Ye%7=A+=jBH6fYu3gz3CL~Ft=o##r zH)=_EnK4U|8sSJ689tCwOiXzc=lq^U+W?a~InDJac~^}Jxiwz-&K@rD8NYN%J=zv2Y{(-C8j zC1rAlH29)6&9~ZX#b&x9z zwLQ1BWX!j|j+!u6Dg`@=s}LZ>EMMqi{V2HNRFN@-|nVKjl{ZJUtEz_0nph?}L&1$g=l44o^Q1hIZ$3Jp=Z$R}fW z!+#L*(|Q=XmGKWX`h8Uagewxy$nUj*p6Gy~b=-brMQC%raQ+7Yx)Sp>%eH$jpk7;# zp5lY}Qs!|B)9P3i&4qYBQ zpst=hFym+2;SH<0`u!4+Bf`e)A=v%BQ2_8!nh%tYrN1p$LJ=ch)6nU8sa}FNGc5Z0 z7G&TZQNf9g_ZOpGZeT_^a#tdb-^NtG_^5ez!IM?bS>etW0QhYP#yFA>%3TnK6{)X|?p%@30az-wDX6MK8g3g{4ST1-=c**%;kvKad zh@^i1DnA}l^)%cdf zQg!W?hn0ZF=^#E!a2T(2LeivM8j-oX*BV#venM ziHdd3mv$)1GD7Az-?D-5C=R#v@r&a?>=Yh zF`TWNGk6%>ww?8kW}MnA7H^mYWqG(%>~_Kg%#}sF^i5?BfEmlW;-{kis@hxY744LoIIsjuJzvpmh?%$L-+aQM)6XqdJcaOlHG}gMnip%uT57!o?D)qD|?VbV(zc}s)SkyLud1>Bs)G}VTm*4KqDsm*zs&dIwWJOes2B#(3{iL`27`EXg1BFsWmihc1WCH-DaZ!GZ;a(V zMU-mKHs=I>%%s$&8+JB9moGHbu(e*o-icgv?VBY8j94FusIWe8aY?Ut+XB%Au8^x1 z2`^N=gE5y#E48Wr1wZNS<5;;R?&hYKAE~B0keLs&k?{0-MkVGt-xaKCeKZghBMpdz z!q_t`joOB6RSQd_etx#_^?tf4jhle7n~-4NncV}=>Et9 zOVQr4GNQ?3tj#UymFFYhZ*H4qJWP^;sxcr(ukK4O zg6&ABZ2G8!9@ANuw&jy}#Rmv;03QQP88vKKAt?08Nq-a#9$#AL9^F_dU0*Yh%VoyN=XJ)!7DQNhl3FSC!Th9z zn&eL88XvLleH$o(`QEhIN zy!DfXp|`+&62EKbqwCLwAvwihiCi?aMc^vVP2F(1W4FJ@Qi5>V*3}9fTwFtS5k@d! zbOu&-&%wj`MFoPXyF|_)CEzpIVirdUXrAD9dLITBPy4t+*5V~)onV%X)Og`~QK(pA zAME3B!<{p!#9%kI$ho&EbEk8~y0T@t+f^BB1@fjF8V%;W+AN0B8gL`s)btm+ zXkjTGx>G%>D_)}1WDyZF=$<$Qf-~ffabhRsu-O)TQYuRoda5-j>NR1xbHocJ!Nq)Sb`+u9Gq9QPaiiydT*SNBI6*#=Cq zP>?!$NclWf_6uBgu&RodW97!V1D^y&Q&0U8kuX8wpaS25?a8o*%y#h`dIqH3`guB& zj?<{U)4h+J-=jB8>8fGMQm-)Ub|9Wg~K26KY+IH9E_2!co*KKJ8(lRRXn;kD1chEFqJnoLr z=j7Q0JPjojgfF|kYw!SPLYG7SNYPLlb4Af#q>ZqK0T{n~!;=+zKDItHl1tM&A8RB@ z(12$Y_v(OJwiO9(uKR}vWv}Ad*OK{RXpqK*u3GK!N4n>lCi?4yPrRmh%f{2l$n8va z1Kk~lbvM8%#hK;z-)5&^zHUFjSFyeI1AsGDf>lbg+~H?FUxvQki4lg0obyyN5Bnr0 z$VVLo{a;6y?o#g8Pk_y#M5k74zc~*UJCcRO^nP}dhAJc5gjGRvC~uF9Q(9ar58={?EB98SEM6S1VBHcC8_|tmqn3M&scL| z=t5zz-4P2{#z$4V?~WC9C0b1390itb;Y3{ZmfkqW0oFX zSqf$aTHs zqe-W`)T##ptair1tYuk{8PhwD` zgsbjy%)@Y|Y~C^Nd91uQ0HC#~+}T(VntQKFX{J`3&iQEvXU&AU9z2%^eXygg9&6K4 zJl__jJqzPFCYB9Mvy#pTSBblI&i_VWViRawL(fD&*nUnXV@`@fZ&AnY%94x*Bcd_> z|C?M1ff3Pvk}LF@I^8<;B~O4-R%EdUv(#o*zk=kmz*=pZ)cX6@4cpx}zJmrK{ zReFViZO%n~+4GGwpx_wpfPo(J!)=uDfkI)8;P4U(SK@Bqm3V@?8*`E!t$@k2PvKFs zKBCWPCldX$Ox%yGLSqu7P}*M+>tob zJQ!!&6O2w^&{5kQr=wLQi}Y1lD*Dg6J*D7Z1t3awY*;d#C#de@DSlSk-E`5d?@BLXFE)X*!?bEIIt$=$C(^Z}hqTZqd<$qy z2{D=|Ls_SQ;{3odlMqAM4bA<#bk8$!q$1bUCLby=h{Ndx-+W=L2>Lw#6_PI-cn_M3 z$@2&?vzkrj3)D$I&l?y>f?7EBmX4Y`-Ku6v(-gbS>8+Ymi9Vcu=xg?B)^~6Yc%zH( zIJu1ZSsKPiss1Jr{``|fShF({@%ep#&+Xh{IGVjODZBDCK%wyTc>DAP+ z7)~oX#j;AscVd{kKC6;JRATrMakm^No(KQhbxq~{@EvjS`i!d)Lg~yxQPQal!nmjf zXVxe3jUOjGHfGXvUK|nx{9VrO?%Ez0$Q47}dnd0MDy4!r1YO=i_T}I#iCPFkN0z{M*+&n z0K!>R9In*BOn0mjr3JCmz=OT2XwRscK8Mk^X?I~xiMn-m=3?n!kLEr%+e+21EHh!> z!T|bh+TVL@6l5UU7_u*sn@^ibZ&p~Pm#FoV*gH6^*z5+7^1C2ETHOqNv*M5`Ei(DgJS?5WP1Rs&Y|iUYxI{G}Zr4 zt{c{B_LL~Bmm(&=NF^24C%?*`_*FJ3NG`%@&Ux~8JCBw{Z9RI)K^>y`*RRJWOOu;f zvbvG<*NG@d;ll2|tR~_XRE$taX_b07-H%b1V%OYhe(r}vp~YIV^cjpxmv7rb6jEJU z9UgpDrKC!1UM@k2I(0iMCt{@Nt@>{w)apMTPG2z>lzSz4V+O0v5u#d>=ng;RqgbF6 zqS^NReubmzLg$-Q3JXYwivNtT$0Z!P?fJ(f6+!CASth$2>U1Sp9Nym(czzA;V_uWV zA!TQ#mk|e% zs=9-2e_mlLlZ=3JmpR}t1GOGaii>kMd-ab*sfzXc^rBY<(Xj-n*3oD&*=EMVDPw+>G!rS4r9OaqG(s0`J3BoRfuJ|@95G3{krTckU; zf?M`|mKW8{?Z`ZZd{UXgWJun?*%2FO+lIxsiZ&-+fbqE1WrywdhNivqGD;D@(U`W( z;VaTBxxZQT8%eYXU-$O(9Rp(P+bEWWznxmXK>}Z56YJ@)?jyo4exr!G(o%$!Y|a*v zEY|T3B`ZM{@0>mISjq6jubF?YimEe{|GIAWu;Tscdo>hJxOXv<7l!3GapU3hYhHnV zRfRF9s5>JOYDIT5F;n{uRtc3qJ-4+meAX-;;HxVWdbCux6_h#mDP5b8OfB6Vow4JHn zsI5j%ulo`0J|;R|ATe*3GTlGEa?Hsh$|RvM{!R@s-wniJ9^#Z7L*SD5!&`0-;ydD4 zwHdQ-bf2e?maRh)LCmt+(4zZXoluUca`b@h;VzaB5+fSY^9iGWG;r1{A5bTm?G2;| z7~yPP=qYvJ*DmHrkuF*Lrl5j`_PI=Dyp#MJ&5vcTiJ*aQ7f0*)oaq=P#iV=}gN#XhgQheu{klPyAhqWQn)84UM*9R9Ngzc2N+ zQ`oWvZ2}b7`#_1{!bitimZSLg1x3NE(*V&=pV}-Y-a8Y+T#-ND4an!gJ^h|Zm%sP_ zPFQ2cTLU2z*`zk_y+YNKg{^nT&lI=l&s3HMb6Isy1SnScV?Khw2sio45huqL%t`=q zF~aeco>N>@ot0Z3S^wM(&h$g@x2IQeuO}L0HpX--Y4flzvUsy;)>kv8z$fM3;kRUC zH8tHE28LGW1wVP&4;Bq%qK!|;rx)ITGa;IJyb(*;6Ek|#O$ZkS0)CdG4&)>dNga62 zSL>%bHri%>FD~4Nr#Si7U@Tu3U}Sj95&jJTo!%&~78s2Jjth_~7ti^b)Qgn}oL8+n zH`HgVFFzz=8731k-c-nq_ltAf9PqM8%NTlSO&5 znb^*;|AR^`yDDSwK}$S>d?%6`;?Pxle%5zb=LD$M7z~sOB;DP4f)>R-O*7loKOtB#ukLa^L}ho zj0scC&XUMr;I?!s6SMA$__T&)>%GiuRKpzqZpng`J-N4;PH-qbRX-Y#)GY_vwaeZ3 zanC<1LM!U6J-%P;sYjLL%?05w#mu{NgU$^CBlE zh>@2e^1CE6vkVYk=GfyMwbVSPnkAdja^8#P=)O?uyp=ZbllHtYuUj8jqZsI9J9y`H zw9uc?6+G6soB^6X`F@GT;R97I6+-j~4$*}_{A+p+L1 z-^@!ICVxt=qjuI<>>XmIUg2rh0WG#Wnk+7ejT{9g5a$xX(fWDDun+z|De_o+m|vab z_;yae3|G1kocddN(AaRnECd*PmbV?Ler=B*%JF3ADg_`Wce!P%|HTyRTDj%lRZ|>^ z48bRXUYJ9IL1gZV{t0X?Tm+m_gU((1Pw^0vd78ph4Br2CoBpQ-9COaHhfSE{HWb!x zB^Fn9p0Mv`_l)F}vnl(+ePYqBd;Wvrr@hcC8yCY^^o7KZ*Ad##3bD-qfyoqNEDr)-+c;b$UK}Vg>LD_$sdEbNTAE zWWU*7gAcW)MFFh$ks_Wc$udR(OxnM*aYdj0(d>k4)gmq_H4RI;)l;5iw%)}*qT(wy z-rtt>s}0OiM~zC%apc**d-4rXc;Js+RbO<0a3IQtFODGqsq*IAa_s-1>#c(73YRt7 z;1Jy1-Q7L7I|PRS!5xCTLvVL@C%C)2!@}L&^{!;^I_K2Ab)P8SR?V8yfA{!C4^F)2 zIOgQWGwCw9(@&ek)R8|J3ASw2_3$BYE+`%J6O>L=6Rjpf>zm(YD!@lS!58MVP_ja< zjS+i%$&hAfA3h@oMf?%BB=Cee*ha<5pX2NlSx) zG0H|$ZXvO7a4~83^{A!GQ&(f!jM|6T3JUTYq%Hh4#PzO}#YW3m?>)Uev6l%LSwl6! zO91t{b2MAhy}26{W=&5oinDH=Y40ogu?M0zLaQ&0Gv8P#%0K)!GVR)D3(Oz?P2ff5 z6+}Zg{}(1c&3=?T*W+ZZYLXujVktG3esdcwh<%!jKUhtC8<;uAKpyTJIM~oqu)F3Y{zK z!^t{NFuZxxtSFhd^I8@T#g1(5-ovl(BfhwlP7}7tgJc#MJMc z{VCTeu(Bto(e%>0aKmiAckvG=$yR$78M4^=D~O$euhAek2C?}%?pHgl081BiMD%DX z;(~jO>+S-Z>OCbelIB==(b`R+0E3s=*ts5L=TpR4OVUy%g5Z2iDDo|U@mJ1Dsqzbo^0~`#o?pDWh$T&&Yny>`0nGxO zGpFRNdy-usHZ!bVr-7JE-XG8O*go9^>s1q_{(_vVUJHWEYO!{AQ4%1YxDIQvh7*G{ zdzH{88a`8Z<~w3=k|gA?!M!^cw!qOgLmyYQnrHLL?=c4c(U^Roxb~l5Y7Zqaq4HKO z_K!|W48{@ri_*CMLupPY2|3xxt`$>$yj)M;k;ywkouP^}2moUUErfbEpvk05Vi@Q{ z#ZJ{J2r8J1o0^}(mRV1NC6DV=Zf)6|OGB1?D%Q45)38_Fm&4_Wg3MEhB|o?QM1)@? zz15dBj9KlVPaE0_VW0h8kBDV?6vM?A#WKm%#)M;;&BoHHhjSIuJ;YG+P{>_Qa@@@` zgaRm1CL#<87jw`mMx;W3>|p4QgdfebnYLMpb;2!kVU)tehk2Z5*AR-cBxLfsYESOm z=v}$Nujr+H?l~wZ-Hj3C7*)EesF$l%z68pi27FcI-Hi|d@ufZQEC)W5XLyCjG%KBb zF!!+(PAB&^Wx)<;E|ozK0`MNorf(B|knhJbZHxLwx#_I0{_2)Mvk2}+dwZ+&hHmsn zg8g)xuyUE+55?>UPc)m40OPVc=c^7xyOy(4xb+oTmvKV}H>wtv z@tf&feqQH0gA{2rDqaZg*yF9`xR~KdvJxg?D4@~uMs762AH-9xpi@=5S#X_Hwf(TO}VUZS8 z!2_{seFAM7kFl7$1QKwHA+$g#$O=`ogow=4Zjic2o8_OVm8TDT^u4XiH?Jts-00p~ zs6V-qe-|Ab5yjlRvz+cbp7@PNjZi9$g$)aS2DeK>hJT>h-}Zz^5YZEQHba8wu9hf7 za`q4Xow!ZSGsuCZ@C28Do$|mL2t`#O#Si?caloG%W{2@Q@)T7nC>vor#ebVcP1Ea) ztU3Aslglw880CSpsLba`@1^wae#jQxFkU^XH|ZNb9FWcdd1*yOJm4UeGJ;bR4c+^FvDsy!WTY@ZMvQ<& zqnzvtu3^f|?E89+%jrcbGxTW%$bM2h>!WuBl4qe5M)}u^EJxu9o5GoqDXg~Mrnn2^ z31yz6La@Eb#m;Y-d<52bSHS391X7d6wxP<4PC+bG&jK#^+Fl@}^JpQP2%-%}EI!M{|>1tKt&GQ?cqtAc9ZQ3k`DfAizRsF@SIGH@Cnuf9oIg8GBF zN$Me7ElyM7xSm`j!^^o{0gCIIXkeS$Yvl`@OLOA(?3s4Vd$hkhW4SIIV{b{)9Yic| zsl#bdej>R3h}xPiLPFSM#^42da1$R=7Ac(CU;s%#h6IgIe{pHK$4mp2WDz)H>b5Q0 zFy+KwW>-!ocoN1DOJ69O1sr1{%wXo`FC3OI;0^R{{F{B0{8U|%z1);9-7pAtKvz*5a{-_=>l+-cF>-$ zO7-aQD?B>F46`+|%y^Yt?HLN-rn6PSFj`k{T z#)3xNHE|CA2HE3kyxWTmPuG64C&{Y(U3Q+FCD*WQ%(W^UijY(f?d?S?W!=|+8w#WeT1e;BX080DPiaNeye{nu*!1C zYZ8-$20U#iV{=H^Sqo1DU2u{4E0;7jb;b{%A68kr>(j@|BJ0@js&5IG53jno>6%!( zHZT&k5VL#>ztEeP_9zuVXthP5fq+r=gI7137_48 zhJb(JYPJCCSmb~@7KV@pt?S>4^nXW=U7NH@B5P@{#LXHb55d)YOB!V^gefP30}tx` zy(-5s2{RWZZe-T_GJ7pm9ZpEki&u&GO_3$ps@qJdMiM~)i_4CVj`FVjKt;V<;M60G zq|+_p303EofnP5X;+V@==1jgk#DTo~s#Qks6CP;yF^0Z$ANNV)IHM6hlotsdQ zJ6dG$H{buc*J27yca*)(8AG0OwpHgccp(JT9AKW*1P{8x+Txu?|5X~imBx1Zfy|U9`vN{}}m)QthqX6}^ zlq)2FpbasI>iZt)0$B_^J<)!UViK)5YBrzO4lLFD9EhzW?1Iz!X70`nfGxyThH|7(4C&p>kMWEAAzB4Bs){)jF{h|!J# zhuD3!qhv_Kl=;pO29}_o7jd-Dj^<)yW5>8uAJ4_u#r~zfI-%Gq7QlL>h^2(`V0pWI z&FSf;Q#(p-(&hLYMe>X#@s=@pH&<43?3dD`2&6{G6bPyk`gKky(Y6)ZXV48>A-&a) zL#ha>O!KvD-3DQzRYH(QnQpE3gBwe{U;pi~d=ESV?J z<|m?LhqV70^AC0^Dhp>9jhow${Vv@*IlleVd7DFTM~qBW!;mFB6%_dY58u68A=y0B z3^cRc8%OHkWWo2sm!`T=3)s<1b}~jOk7e;m;6<#%!6*Da){X77R{{(tOxD+IxjVKv z>i*(on_kf-B@>zP+xvBB2;I5FrQEE_a%iP-{|2?)12ceg@dUoO2Ui`DCCt;ipL(Wh zpbVsnvl(VCvv9!tOTNFO{jATc})2DGh8_kTB#3jd3PiHps&qB!ATQFYaM`u zR}$OU;TNvi2HNb)ubmRCh;OGfW6vy*wo{H9De;dJ%EzwX-E5-#;6vKLN84ko^+x+- z8cr3_5z4{@T~$kG#jZywu%uTabe$_$U zCR1p=gD727hy}ytcHQ0>!_V(dIS=xhyO5pgnpx3^P|tEvKgwnR;&)^LWT4k~hHDjP zVnm`fwi{vLKKa{#M;6DeH7=gq4|S~Uygb+-AeeA5QxgaMfHeA}gO!K*uGswgT^9L9 zf2q0Lu1c9XeQMW3jw3w53S%b5Kv}|2mFYI_Sa&ON`hm7#-~{K= zpAKNQ3Ju#y@}#0{+k7&6h@plij@#>DR3sMC1DceF#2m$cOkZu;)E5NZ8Jgj5*!)O@ z^gHn60S^SO{((TeiQ9(>VVAu@ueO)ChW_};A-)9#u8-;oSwTEh%CcUMXJ*z}0bean z-}TCRs7YqxcO%>>b-Y!^dmF=42Z=HSUaiw}!mLxhynY1=bxW{h+7S4}%?L^?`vv>q zVR`uH#>vl9T#_Xme?e`V>6ZY`JAgQ5yqmsvW3B??=qLKiWy#+5e(@_Vyvy-H&gbOz zStRSkJT)9+p2WWfaZSl{4KxQZ%`c3d_5F2s?dgNTWT#Vkex~d00LGXRx38szFnIn7 z%=T1rZ?7Hyw_1lLtcDaCpc7wItL*%MAyef;YBi*dkwds0TlP^8g4VX)qZngAmV2x% z_)-R;C&c>=RnBRK%%) zd0>@JAEP18pF8WQS@C7Bh&n6I;S{r{s&m_8%Slxo{ zJ+$)Da1oIPWi6>k;CK_9PQu;yH7@MZf-MGtFO=6R*<;p^9fmsbwSwsz%G488mnuHu z6VO@1!9VyL(PQ{a3z0ArF<8s`9zvw(u+WGb3=TH|7(EB zp03ANOx36NFi3U#jyF9$Grky#Vy8GKR;Z&G+`)@h=$7~^EzUpfCu1}pBq3VML=+>* z5R3xthJw4i$jp!ZHe^L7`K0_Y=qEGG4tjs%3hrlb2!iD-#FR0l#qorhIiLyh+fTJz zwUDx15McJbG2+g@Eg;A_5W)P0n^b3Z-3b zwx>YCoZT*;c7OeQ1Kix*6Q}F_@d_vMp-9Tx85?vlb?^S;&8UfCU8~3y=zWmWW3zKk zkpnA0zyf6e4~sc$-algzaNGEMS%bEL^SGZzFWL5j6cNH=b;SEr zfs4-LLnc#XJjW=oGL{%_2nzr*#_nafFc}N z0t2m&aV~!l6yhHwN4lw2a9vQQt_A~C&W;D_5*i*a3KkvzLt_G6tOhfGrlHk=0I3Le z&VS~gK@hFIDvyJ@dfFmpl&vy!I=uW!c8VUv>n`NVb<5>lyw@u-Qe?~*^_AZNHUy$t z0A0R(giFyc7S*k7(-{lfjgc<(fXmxN(yjK=Vx_HYV`+F}cQ^=~y>BVf{G2*cDixvQ zMH*N{rgaJu-4ghSt+1%eGc{Cspxio#DB8xP#FBoAmm2NwBeTi&}C z_1~rpMI2Y-Ui7Ud2;Hjo)w@1^n0y4kKoTwl65^>r9_P85pu!cN4OH={r$P{#PApk6 zDOxb->!B3836V9Rd@{GYMkf|A<;Wpdhl!jHa;Z6JLaWdfoe6ts_CvGsCjwhSoZ#alD|!eW*rYHW0P z{_KIZ%ktuszdqQgJH4G!%sSKC37)prx&e!yyCJHLU9u0{sCZ7R2Mdk%Ib%(l8Dc9? zUsM>rcE^|}U9!^9%pM$bou@AWorGY8IHPhpdAbX@7XEFNmQbEM=^mJ9iS1nOIoc*$ zwNF@=!{+<4fd;H{olc&&4u=_xO=f?EtAzy(FRu@Zf}&9s?Y} z5cnfb!ntvtjQg;xr zYC6H;>Fx0v41moV%;a;EdEi|kmU{tc|!aKrzFMQ{5^mPN7t zhDHCQQDL37q@rXP8Dk(}YR9dBx>vKAN7Okt`4b76eChi~f1r~jClLW|PBERq$occY zSK&6@-J!jDgvGc#%6c18_=}0+w=Bs>mwB>F2WpV&HE-n_5eQBOK@XZ($h|p+RTN2p zcLU{(fA7fzquZvK6pl<D~-J*|#vn-R-!xp-mDlFZG!4Fyy|Nd>+H zPgw4UqbzgWSi68c>ftXTNHc7NcwR7U(IKmzdb2aisacrg5;QZM!%tGIjUJPfdT7Pe#5nylA2RPR`gW5iME_lePlAV zs01IG9|5GeqeA)Er}VQqF#n{p+L5EnlCB7hO`O-{;CMHb&Vrhb>ZRkHe%*~YnhD;@ zNQ~23ks~h~U+&0v*=t|Zz4UbFtAaC!0G6FbVB7(Bn zOCU9`TengG{BO7T|6{flY}d=8^cpJGH4k|YsN$#TIj0rUdt@*d`b>&#UxJMVo48bm zQtIzm&oty~0R-HZ4qNfkU6RI+FiyF2n0aJB@7n@P{H2A52!pLFiUnaXvhU`CH86hkPyDc-tcPJk{s~*jx!n> zs!;!dMn+HSJ3>>_1)7%}Nxs(uyW4Vs_1_gCOBO#KQ?KtY85fq$#PV=Bg03hcOP{|0 z(zHtw=%|)S3cGl;X6rW1sQ9HP;`(NUKkYs(a{Yv@XXU-Y{I2kVmuUtxO_fQrH)p!ayj^Gk+zDX|2^R$NnlJ zUo!C3A406bG*6=p4mELAg~~baR{00eZTlM(ZVmj6Nv>s)*=Mh4Hh57OV=e@(XSI0X zrpki>rYGn?LcXgizkJVu5`s^`CETom!{6Xymc}&ImJUaIEKzyyEYtqYSLbhF^#9(y zbQlTO_A5u=Q=+ge^CQwa zs+d2NtpWz%m2PzGl)bg`Jn2z5+Zn#TiE_oGwB0OfJ%J?pu(*5(yBEnS-cX4U_ca>n zEy##KU7V?4oTKO&7C`Q{&RHq4J8tP~q(+i{j`(CXX)YXYw3a)yR#ew>Lvo;hM)xMg zdKT{VZM6Ptsl*bN(1^9t=q-ziz<4y2|5h{Y8PdFc4^Pd!`XkE7#)lM++omx*Dz}l_ z>pQPnk6AVSW!d|(U}64ln$=@d$l9Z(0TJkOjr*f9e;=iBw$|!467^7@uYy*%yKt`o zVBVU00NZdrg{D{lI+cm*h?u$WTgFtsn$D)gCu_xT+t-+~l6C>-{C5|D>QI~^+vYoD zM%Fv4lU5m+&RXwLq&DiC+nrT_`-7{H4&R<8|ps9LNy`q!$#(IxoT*T^o z3Ip47TL7IEJQlnN0^}g%+qD-K`?k$qMvOcvWyU%-Wr`3u7nzh@#>%tcJ-`NeZU#u* zBHHCt(Fqq7i1bh!6N6FvpCiel@BYd~k9`+)4&sl&0)MfD7NVya zha6&#f94Fkf98x+PKqp2%cx(EvkHy$G8m;&KVpT(ICx5pzI=^w{g5L5S+j)6k$t zCeLiLz}}!d(!8gIhDvSR!EqiV*z2ndluEo@1HT=EWPB5Qp)6Xb%sz$J2RpBqB98y^ z)(cQnJ7>+wwuvG(nNRM6EwD(Ep^v&D^W zuQn+H2YKYQ!QIK|bRGeHtq*sF_PkhQV$FENuUnNIpIpg`avz^l>hw|?Ugb(ytP>tkYTi$1M3W|LUJ#7OL71oTgE(F6MKB?CgMv=*bjpwBLppB}86oMID2 zjL^8bth~$(f?W_P{{d$e;L8X@zD<**S#GaP?VwO8h($VQPfe&l=ch`~0)9Hwtihj@ zGNCcpfs>B^>OETo$0IMH{-CJ*3_Q}~_eSBKyZBIAFW$EM3Kt3(ml2u7&SaIt=0ahg zYRTRobA|Hyg{aXs4mH^p2q8_kB`3&%L6qZVPwoW@g6*foHor7tHWm&71#JJ z-*tQq!9is<#b4y(A?_M`IYsHY5XeCzjNFeGVq`N2aj*%MRQu*F7IC*Dmxa2HdC+ zcm{I>@Q;3gY({Blwbi6Gm35v(Arnvfb1e1Q0}5h8G&^=BCJ8vtD&ZzZ{(g}W8eqAs z9cU6~{EdGkd==9Li#FfP=3BG#yC;K*_PhjkC)UUF_R;UIa{yrNHw@K93V>?!1#Rz8 z<1(w{1KhCg033?aSgewY4$NWab+ynB@sg4g(jA{wl@W=#QIYe!ifDA_z@Z(&Lm<2q z+~SeRy@Z()DHqq5{V*tdD zd8|OfLMRrCg6{Waw%>QDUkHoGmJlnGLpZP3(;@|$%3ovIu>)BhBOJF5(!Ffbx{E5m z)CZfVe=UQ zN9Ac~k--X;Y)1%l#hf#$lWg(1p~6^eFV_T`@91pTIg(qd%a=pFa|Gl4Auy#KURA5bB&d#6(F zEIDWGf0L*O1Cs|03`HZKG@WdyuB>Z8BT$o8j5(CKEGLfVGj(3mQ$olQf-m?&YJbrP zU!f~}$!|Tj(oT-I%y(UP+4oT}DO&dPrme8guRTPWl^+y3TPjIwK65%R+usH=gTWL*0RX?A&kcW1gEAk&j zDRwQeo9KA>^N zUfCHafZw*a1?2ph)oJh6c0MQzLr>xZbhu8z9t~IlM17vu!w$xMm%<4gcVel~p=V%l z9EhK!GO&{HB5(~w`^~Wh!W4pE=73_`Z#A=r9aIvRNTSOu9&V8am6P7%&V4<+n;+Xt zG99ftKmCfsR7&I1;BJ*k>XksPTI@PhXkeT+s?Z|qe%}}}vvqBP zz&L#UkqR_^V<=zsP20>Fk9CDF(Wjn=N2Q2Mnx>M2ywHsd3D<&bD;Fa&^1+*b-1#WN znTE@CXi>I;Y|h?T;5+kp)Z9MRUK;r?)<+M>g!fyH8{#P7x;3s`sZYbv&p}kFYsA{g~1#16x%G zrWz>TGB1xZz@dHXCvL3@kNeTf&e6!R!I^eVdVCC$1$SQ6WNo|C1$?o25|}{4*(jlr zIHwtQ_Q=?y|D4?r9L6tVpW2mp5x4G^Ypc1k(SnWf-sR&$)Z&4?Yr*3|FjV@L4v+Os(}G(>{h z+6YU4Uf6@siN%+9%9ML?iC>I8FiXNxdge$v!3^tqgy)ojO&Ni{m4y^QS1G^LZ>tDI z$GlVRK!cQn>ut8#8TeQ5pL)J0HyA?)fhv8dYX6vtyH>lqjIi%Q)s#t!#wvu|^dF$k&`@*eAe43et$_Esz%mEWrcr#DbyI z}TM%0w8kOlBapQPQ^?^>`^A2F9`@>AxOwm4nOcA|klhOF90gmmeN94*mHkK-3UqPX7I86Tqs0 z;5{WK$Hwpby<2akHsB!MduP{p*WZ}jcvqNHqJCl@;?-BctEXxU)ZOCQVX$z1bFDc; z*D!I*aeMeRFI{d|MfDHzHKofB%^NzKKqchjDI{r6mVfNDslh`)p`2IWsJlalH7gV2 zlG%mGwdUKnh03O#iUlvN#Zxn+SYC_GvHB<>j7w=ui+6C9Ovq?O3oxBZR6? zKq}{h+jX5^lb{_|n|Gc)jUdJ!MjkNM&5CqMTCQkWVj4tdMJk=Go|iB;w=I1ItaWT} zx!fs4)3(P^BW8r`^0Bn`XHZ&p;0}@(gksD4C=&QPcz!n+A1*hzL50NtRvzh|N7gdp zS<|nc8BF@Nb|jiBuh-`xMjI1y;|<^WmR~oVQwZ_qz4NQ?=icB5Uy6c~C@2e8lma0$ z&SCJJ#3CpF{C%16X6opnPBI!gUt??69m4fYM@bcv z;gBI!Z-|?X73gG&2BMi5b{c-?iBggN#cuS!VACAAyQy?`6j-0yMpz^ZHR^BYB&l9+ zCmdK`;Eo#yc;rT(cwTk%q!`(KSZ+q@io^Nv6;N94K8)eW!yv*AI@9}PkWXJdq;tC3 zgTJ9%^80LA^?HovJAZ<^Ju2G;U#j7xS95Y*vZA7oBT++yHiiyi%Z-D5x(^;H$p915 zG>4840toCEv;DXG`CPjv{pb7pyKfL7`=4SoW|Feq*?eW@Bdx7@0R>rODHa(lU;^?f zmt!(I4^b(@oW^CA6J4GA}Dxbh!zlSYHS2N?bFu%%F92^ zd>i8>$1FkFfqThX;BbWgu=LH%$zT#?sE)6fS+rbtFaXHUo)!LVA*f>}+ zEy}no2~al6282&pwL!skQ1W#vj{=wfTxY@xH<8qOJ$BIQWQ&)T!r5Y?^o_XDbi`9P z+dtPTxH-P;xCT!I1Q+M*?R3X+G1E$6Q1*s~jLz>eD39 ztufX=``GEPCyOCZC3sAeV?>Imuv7htoa|QFCt+z~Wjs7bOo-zOD-E<@f)W|Xef(4p~MiwGmU*SxjP^vHe3hcOsi2pTjPg{xH&=?d|-(?%D6q}~cz}HuG zEI&Ug*rnkd?A=e|d2=2k(Rfa2fig-sn{5>?}_W$ zb6Wi#_1HzBKEvm)L;v(8Rnw1a*Yvc%3r{}dVOgT%!^|$U$9<^f=MG3yTI-6|wbEsf zjQks=dM-~RG2^Acm2gN6DcAU;r*bW-KPsE?x2yODBaK;)jL-mkoupWgA9KtfY^0rx zv>+}!JB}d-Vj>)UYUC`19ZMFVq$~zqs7Em%A(fK=yZTxF+C-)^uzgdo?w`wzDhMvx zQufNc9iLDQOip5F4N(S{xL@g=Ryn}+L;J?HU$$$C&Re z`2gq7o0`{|I?$~Rcr4Y~XxR^gLUj=Zbe<;HOCk`v2PqZ!; znm2Dx$WZ&&+`qR_vk@Lb!mi(Ud?s~~BkAmz)ROK#@fxbd7{7|(t4kC`g4D>3esJV2 ze>j4{opb@wb3Ab&_sAbN;%QwJ!xj=^HAQP^9|aKNaa8C56qt0H9ZhRd z*Gkq7nrcta2FwLNmVcWoI6+r2X!OO)@)1E&L1WRy^!BZ-$g{Qe)48;bC%RyAc?Zwa z@aRyXSki|qgTsX(>C93DlMn2OVb&@4WUCzV$Vtj#gb7>BpbM4cUpJ z^_GVROoC`PpTeX&#fc`vYWlB+fQZw2%dWFCqT_w9 zY|~}wzUw4G%lk>4T^j0L28*_uZ~=z#7PLHAs4Zgk(5U&x-mW~=by}`D_99=~tF_qx zD=@*LMMycz=CD8dEa8CmCBsm#Y0CCDV@@1g54qz|&EEI+X%OUOlPfYl?7x!I(?XpA zI5^C&ITf{kDn1{F9;AKVc^vW4#yMh9XI@85R4Y}%7Wzv;*Xq#{R)73(r6zDKjy2?w z=O!1_oD@>Iw$w{Pdsy*hB+sKudTqDn<2tG6TSw|+(pr_N73ZW_)m&;&gBds;nXsl7 zB<5L43YaD_27=xEB31FKyK~bF?Cw7OOan|~aqOGzR1eJc`=96QV~0CdxK*cay()^1 z$k!SN^H(Evt+_^d0@=v(PkKi0hCr8;FD%P&oGY&*A#1yoKPZEI1QNA|XVDqpA*c_2 zf!a1J_q;*%vfcWvdb^^b`)Assq@8wFZC1_>)iZruZoR*WzBUz)&g3XH=!PA5<>Ig1 zT3MCVD{l&WqgmTu=i>JFD?jDyoG~cK1YX=xlkbTCF51!jMil0b>y{=L@XqEAo0N-K zP`S5`I7*;JF#jGpm%zU)gLgmMC)aP)6xlrnLBoAWuYn8&Tb%iUh_ycox z)Z)lEWRYt!`pBJn7{XKd<&XsDN9tU~Nxl0CRC!t__tU)jkp_Ld&UZ=QF?Ir-2a#EO zMXTwm`Exa-?4mmJaUA%$3h=)7bj;*Ag(@6f@$9+ixNolh!N%ZkQtP!9<^neT!fu|6 z$b8$!W`E^_lpoDzsRvf|xuvtYUB3N6k4iZE}2E>vH!r+I#0FDe(HixGI@f@Ps$^ zg6Z_T`*oL3%9*?|8m{R!j}FXD@xOrwjyS7pdD?V=^{xKKM}3$i+>B+<#c|nHh|!~KEvb6lkmRGhFyUw557lOP*z(=a&ZZn^AZ=SZ)h9ECQjqL zj|Bp@r6GPcV!QX35D>bbg8E+%B2F(#FCkVdx8a{r%tXlIjrhpkulKD(38wmd z1q?^0KR?NyKST?tD`ahikHKUn`^Hr3ImH^@sNtXkn48Mi6l_dp{VJ5rI}0t4@YdAG zApw-}ZKL7}(+9;lu6-DMUip8xr8I?De*qe7P!aV$df%b*qp^YmHyTU=P?Mg{_H4R* z{KwGe-i6)c^$yv6GgB2WEC^n=S z_QVJ<`TBg*-hnDe(|CHmxyxQq9zbsysyH136jQFCj>VZ@i-hdk^ZedMHNg6J+BeaB ztJ*@pJ@6iNZ6_-N2%4V;Zl6IQBCMWujO&FDDA35$Nl}%E${(T#(Yd1vXzpn*qBqx; zq*S@sG6tm%{-kD#nT-mawW>W8LTjpyrq5{6Lhs|Ccu1U}br=4F1$wNPNG{;mtVYcV zf34*5$KV2X#Z}Qt(Yl>01YRM!azZO_|KeNK@j5(>#Y(q$F>iT@Ks+#TJTi#ueaP_L z`=-KXI0Zc01e{y|``@w;UAfc?p)`6LD?@T&h=(bI$#jPA_u#e+*+0@hFOK{thZYC0 zz1F4Tzu$vibru?QThGYLU^iRe83=p{s zEwuM>w6*U{Z0+{Vj4U-|I|ZD2sQgq~v1Ig`Zar`hZ6Xr7SiesE+3D6~WvKk713u|% zzh1A6INDNj{;pS{?XO)o%VCepP@}R`Z!=j5fLFvCC7i50cii4t1R6hW)_Xv1;+kQa zE@YuwM@o;1wYUW$G2kCHEtY-gQg5%0z#l^&{CCXcbsnv*GUlVzTZ+lj`y1zsQWiN@K4agjyS`zFJqk^#ms;+g-n-tb#RuJIBo= z@ST%hl?W*(DjTQCPef+gM+_xt4kymkei*Y(hyOF(^x{*pvLXBB8Zm%EDzAv3k2i-S@4ZXIQVmyV8kcAxX*lyYw$;zzz6j;xQ3XNtoBEa>j|4b(vV z&f^lgmgarNMas5P2p2knTbPe)p;DyG7@w912&duJr;G1lYaE4C%^f;k-&D{>X z*iDg_`Y5iXQOc$a)!Be&f(k^rPNJLuZ%>Gr$-EQK@z(WCV~r>tCC|mNviPEgBk+6j zWc_*naJ6ro`B=DN^yEMeP~BK>R;;t8eLAb(HF{=W4^vQ*6YtL+$M2&Y*b&7-wAU2B z43mfYk1fBvPYUngqPAwAtu>0{xqaT|<@vKmIQiYFYQ1=74q^<_bBp|O`_!!3bJf&f zEB^tOgP_k+x8td=%6`-$*__SLz(a%=@adiIdi9er$3iAG+hAFl^-O;XSkogo0N~e8 z*hZnl)I*VoHgOR?e7^lfwdTOV1}&Se-CKFJ2PJ1%Ao3Q}aPtT!HxeBp^mx6J_3h09 z{ewNh-+(<9fiMit-jdKcmSB{e-+vuKUdjdx9}KE=ImNgmoHCzqAz*pNVwMtbB>B-G zt!LQFD=!C0PNj6Vy6KbDUnbWT99@^o;6@H@a`n6`&Q>)Psj6L^7VLp>*>1z#)}v&_ zzPI(qu^!kYsoR5X^khxS$P>I)1-l2~Z*oY4WXQk@17@_I>G7#PUZzeR{#Ryb+@`n_ z$sbajCfn#C4v?>35r`YXf<>v`jOE6qg9k#G@OEn9vP3Dt-p!Rg-|s`dD@?KHv%=(h zZCG$Mj@C?NWj}vK;JFM*uM{g{r6<8wg>4_sagL;K2b~-Z?bYeiz#)L8M1BDAQ}zCZ zN$*Q7qNGo03zEXQwqadE3pFK^*ht}kI%FzuF7=`S9ZfDDK*CA(uhWL< zpu;U-4OohZUo(5Y`FXjrv6d54U%c?=#mb ze6VqTL?AL1%T4B)iby)gSAV$P_kLkRf2>|~o@qG5{Ag_YL9B`#)$ULk<*;<`$&YFo zU26$LrKSDK8x}ZL#Z=?Gf=8a$o<XaxN|r20KezEYHZ#dHFhzn?RvaGWfaC3GL!val5qkr6HlQxgo`BS2_K9}5101P0pYPR*K_{~hak{~K8f0@2AB3mZqd@O^@rPC z9%8u^%}M$po!cB4W0t@AS$KQ-6pc|BA?Z|;H%B0oDIE?6nt)4E-eRwoKLd^Vjy9m9 zE%f`*v*jCU>V1mE#xQk%-u{w*%jxH5w~$>pju^z;C?oFnmF59Y-U~C4s(<>|O@RiU zod0zOIY}OHPw8wLshDkblTFJWcpDnf*}(iYKGoV7j`wH%z0)ojzf>Cw^E{Qw` zV~Eh_mcvXcBm)+X1Mk7i*8_VEgEFq+8`fGew0uh^9S;Q9Gp0$?*tkn@sXbtlG@iny zAk$4=DB1YfnyfV9u6M|Q_*)j)Me+*t(cD91#@u6<1@`RGz{leRybtR0hel?j9{_@m zSV)*17sOlH1B@D(IcY@K<{VMP7wq}hDOD}lA$K%2`6 zr67}#teeuu?onOx)0(GOOyzmsQ^e+UV*6)8V!tts-)hz7RPWbSnTK=)qCK0Xi@~U? zw8G2JkJ?cON)1YXRKo9P1syjq1TIq~L8&;pkM>)5-yDH(A?4Gt0wR-vKeAEDwzz31 z{OkGJE+m%)SCdE6T+XzE;YHXbyr%>56{UU8Rs8N&@NY@G7i_`E|4e7xlS4aTth`2k z5MqQPz`|IQ*u}#gO7Jml)$q^gAWRDPW#?|>s_0a4lCr9FB;>oUS64Ex&Pj^!;?9#j zkXE;4Q#u~cY%2X(YcC+%QuVMZgkoNEMVuzUTgzF@KZ~l`x?8WgL+uqI3vQdsj){O8 z0EZZ0y+_L~A~SHa%*T2VCq7czY|l|U!l4$+j;*yBVUJXB1r62#zITDMBs$v zhz%|;z=B&3^FHN+O;(nz@_u6B*S<@AQ^UPCsLke5(s3Jf(zayRK{~FancfI0MuuL> zFDV{)yBXuXj;ypOr~yb%(L|ZgE2r&QLa8tVZbv~=mEDQR-{ueBPFRDkU34ou`11WDj0zaQuyJoYUr83=hi>sJVVXZZ8w zB8s6*gJVIo%4n>LsMIugs9}ZTEV8Esl`12LvnA*pQO5f6sKDRrZ;L9wM)W!cM^By) zGPb(8M=rt3&U3Rw-VxOVuuw1JHo(aRQ$DHc?ePEb^_Edpg(kV!@ z5$T3aw{(}3bcZ0_-Jo=LY`VMCcj5mz^_(-_kBd)xFz&VPIp_7euIV7E(D~D~Y6k_w zQSrw?TZ|j(5^<@q7Rl-|9cX7u`)yGZEy=HYE@3DMV1IkQ5~}?Fc614k|7HP{kXLdR zU8IR830~oi{L(=$llHM@_w!7b*dyKr)bWjDHez6by?%r^FGx)0t$XwL8o>!;WF^3L zc~YsPN0Lu8Sn$TA;k39cZK`a5kuRvk8q3#z8|m=^Twri+G_jx6z*6aPEF`oVT#U8o z1!)skjNIB3t732cfwBMA+ry2W3`M#QwdrB+ME@*6DMI@K8D!wo&jCilVko%>Mn9gG z_r^NU;|{77V|12*9Ix9a0J9bH?nf4b26Sd`;b`Ta3o!jf3;g1$xXg~?u3{-yJJNZh?`HEM zwT`)zac`ksFr$S43$H&B(Ye=(3&j7zN(EvrG0uc+F4TY$O!C z;vg>|O~7Nq=NYpS*lgGt9!wi&ihhzy%yeLQchF^lUSN2KybgCJ8x&2@vkbOl>?`Kf+`> zH?qNdW#7!6u*il)x0R8-XBE1;VyxKtv zr2l5KB(jbCOA4YFNqWsBDm1VEqN?F`#X00x^8o)vL)#x>{sO zkWWo-#3E~(_xkuc_s!)RiXr7V6!#DdL1h~>g^b-E5y-w7m{f71UoVr@9obbNvo<`4 z*|{xM5Id(zfe(0H=_=kN^P*^SaZ$emJ`ZM$%qys=V^fvGD(Xb2Q}>+ZOAVWF(u_vS zqaRot;iU8pQI*GIBfp9K1VrW$ptqol858iQ75aBm943=3gNr`GsW_mUZVfd)L6nyJ z5u#E8G&SFE3aw47wpq0UagCH-YC`&4cK3!KTrY2Q2u2({?`z86e1tp}#xz?8Wey6; z*@R5d!ncxZ_aL)q1-n*_*AhK_bFR{RNkapg-hV~>)mx?5`7 z);CgKEz@28>;dn~0Pll9QeHPST-jhmUP(Ih-_o5~M`0#^YVWZ6Kei^R0z;}^z?NBI z=TC38o9UdqzO7n=Z!Go%BTK`~g}{m^{^_wMVsd=Oh!}y>GsU={asEQib4uFPuIBg6 zL(cU62u@vg5|5KeWdWWYRpLulG1$_QBA0ZH{ zHKiRP#TD_J6MG3^QYUQEZIJ5ecu~ZuSBC2u0ZaS&u@X_SEe=n6UZdxLg@BhMkxB3y z+!-f2pYKIUK)r+U4$ncSUu7fy{B?f{3nMgb6m@_;X2oqOhDfM3+e5zGpAg86_}?Mo zpsK9e4?pB*RDY1SHO7#J)s{AN;cpPmfAe8qsIcx5o8FU$(REg>p?{xMk=lUR-2Lm! zW~{~AP20p1{8^`eI^^SJZd<_OanFzu->qd%ylNh&lRbR|J}o{V){)>=FwcafgxLTb zqoEwIzs|x5=2H$3{1rVm+`B+$HgShDV~og>A{eY3-WSG3x=|gyik*S}&zm`R_A&V) zUoA60ahgBXH#q*5L-fVt47o|5Iw=hW*a8+y@{z~Qw%R13Jo0(r&*~YNdDnr3nbLe{ z0Llb&zMMa5B-R_52a-{&OU?+n>WE5ez-MiNS*#yDPxcUF?Wz9!xw*~lK9WVeWT0?&C{#ZT_E8LJeTdoEF+EX^`8?<4()|+>eTZr0}>gt!| zckzN%jM{jt-9JKK&$IA<&NC+_(JgXm$K0a5!d7@1=*_&3(QcicTwKSN<`BJ{275us6a4E6_dVEB=xT2Rb9tjjBkXGopO9(DBM54NjB(-D|El`XpCa z(^HvikLB1Uy`8#OLc(prW8mm`l9QM5(A=)uOQbD_bMLEGQ(rZMax4+3u-J7)`ntW@I-nkEZ+QzUP+9JpIyEd(i) zKd2NNy*m?igu%M}(kO9`Shtu)JY2C~t9IZNl8e0KfTp6V7_vd@Zh8NzE(YZOFD2=g zv2rgFoNShTRO`xmQ^|d1 z->PpsSwLp9t8!GPQiQUsIZs6~_<3ij7l!hxnh5o`W#b(){i(Eb9{6wjsSYJf1Rp%3&RLd%P7|34!O7( z_d6&bFi;1L0Sz7Q>l+B2@b3-$3s<<%s4@?NOcSC7JYe@iZR zo~_KgmR<=5pEbOY*l{M9w6FH2!J+X9W~!-s<0)W2F?K&{K7W{g4IacMUEGe1uE9S% z`0fV%+AqGsi}(A_j`aBZHjmJ#ONr0|aS9OR#lNRns5>tSGwoPu^1TKMzJ78%7cAL$ z*D$4Y2q&-381o4upO^%YFh96h#kh}4+x;bUXsk11+&EBVB;*voV|=~CbQRf|r{mBX z7?~o^_@sY4|Kt_)u=ivGg{~qg*CG#HST@szM`od6&9|D0{VQPdSkAMpCi(16P{90! zxgiRO@UBk(BEx)%l-h@68be%}xFq0}!2tkkV@a|Wk6xw`t zaPfL3>Mi6A|D)bpy_$d6+p-RMYQBK{c!p$;cYeYdvZJp8xg-=|tb~>ueC#2kJxTad zxT0z)QrvysE9@xV&>7)c+F+CLcEU8t^CCl|lm4YP<4kzegXvKKVJ8BDP?`><*ngS@ z;Wo3#2;aaY$GPTmcnb6qqT`9>Jnk7Y*xDbSbIx%uPr_c)zz2u>P+9M;=FTBi5zK)n z|9={0xM7!{&azsNsQC)wM?@}>}q`M$Dz!&?5E%J1YqBOKb$2n@5yL`!}@`i zeygoqo^?cffT2wMP)qVob3Kr!Q*{VuX@a(K8tvf18t#M`;8Um+ z##$-e`^?N8uLH?O`o&zy1DSm>p^=mgltK-3YQ4lll$AeT1$<)G9UjF^mvTA=pU9&9 zmpBt&y8{JiA=VI32o-600o30t@LM@%& zDU?9>HkLk)^3go$c1UclX$7ZMHC{pKCzP2<3vQWy$W7F}7Mit@ux~D&+iCM;Qp+FW zWon*i3|KqZIdB}{YEUD}n8TPFBL=@V_j%Ll`T{~9{&u@26%qyicGsIbUUc|+cGKE_ zygXzghyDGK%*qYx!ylEWd?n^Au2M1%nuAM1(WukN>w%Q><9o9-S*4gS!jicxCPmcG zW_M?l)1Pq=g0$X+Yc1Ll3Bjgx$Ao=nohNX11;gYSXh#e!FNCkXF#EbyLO{{cN`|VE zMGMC*hX-f|&Q)&cNlkx=bZ2fG#hUg&UZn$0%sd8~7EbAx;GY+HKix!`-j^7kj^k$9 zl#4Fta_K%>H?}`QXr0t45qwj0S9svr>%Ym-z$Ui>)kGBlfNk+H=K*qz-|Lbk9~f!H zZN&@N7D4|HOfdMy!J26WjP2t*yYNm^f0ppplJ24x(NVsJD$<-v12RP?e`y|JEU(%% zrGW0QMva8EIdW;vKjxxMDl)o*><~C2(HopDj*Zr4+YiZnu4$m~Rg$^w>6fTM-#%kG;PS&fNz&zESK}Z*rHuK0H8OZp!u?Nr4 zZco#Q=HOv-uz8LREg9&xh<@>UN!aX5w)`Tj`0SwKSgX#hhsm8S?jy4#HWLU|naC_hoiS2;nR|rp=qh+bISk z_=I)}2AN44i8CK=zL#kZ?z6m|QUnn?C+z}vlw=P@ZPWzSw7(DNn2Y`bO>k4z0Eb&6 zNlDdR{J`bCjlZ<~g&;FK0g%Yi@S!9Rnfde=5?430mA#PVI7qcPU571`Vz3<)qKE#g zh*#S@L|{+p{vI?{BSB_or90~vU!xc?Qx{0;1xD9-FAq*YLBjFSQe6;66+^d+v@t3z zFlqNrMse$eSa@>R&Tx=KMhSbWux`>}DQc}ceYLx*#qXW>5lV7@r<62wFaWW(ven_d zUK9=oZAC|6ufKc~_bs~T+pF09nT?^fF@jNnrBocnQML!(KDy2q3$n*O)4VJ`l632K ziFsN^B*Bo%V1NDox$*ne(&usHM8OCU3%AbkqxP1Pb_enN9B_;vr%gH9SSld88bRza z;#KYVA~J4;44`PWvWKo-t%G~?AF2#zNp<9E70+PnI~GZe45;>MxRt+MnmN02s?w4} z>8A@+XP?#e8!zQp!m;ZhB zQqrIpRPHe=zO~_BlKSikCl8e*DcILMhS~!+4;Pe` z0GFri5rD#hcTf?%4E*H#9;a#bYp!o4xTB2T%HA{(JVmZ}aMdQxi(p{h@RUgCA`Cc0yyC_I@HI**y3A zRDKlEDxR0IKGr(-_}X&&ZU~!PJ}=U?tq_PK_#mJBJk&~lWvKOy^D}Q3TYT>$&Dyg0 z^xn7xVS4@Z0}wn6tgglbu32HR&{_FaA6(jZ=USfO51V!UJE&p`#zHF%EP(@!rSuv|Q) zN5I$AQk_jpS-%V6hRnw!FTSe#-20UTw?TQKl!5Z>rpCR|vAPe_TRGG(&$H(thbCEr zc$v251}w$jr#Y$yI8;Xe2De#+Di{nWig6vv3#~WKaB|vKBFu8{31YT@G?27~Dip8ckcoSbmNld&CMACxea-B)+E{)nr~a??;Tw3p7niyE@ z<#sh+a~%7Kvx9x=pnFF$BCtH=-&gso_FL~|#5@XNfM)dK_!ocQF>YOWWVTRJ8aBI; z{PQO0rcBc}ta!DNv5+}XK|cnkYI|eR@}|q5YIa6`cacgB67N}D_RTVYM{lESe8;{^ zdZJN?c)6@S$MpV%jbZfb@@Mq`ldA`C3jKqho&iy^rg`|(uf8c~#=#Hg3;4IWHXF3Y zulkWZ;Z7+@Ce8iX@yM4HB0(HtphY?oIi{`y+b^mtt=W>#U}xv1Mu&F%&zApM2Of}Q z{xuw{EZyWKZRP8qEi2=?gcUkM+k)0R_7xpe#$zg(Hzt_SO!wVn>^Il2L6f3vMUbU6D~TuL@Ccp+E@67Kn!~uEolzhS zGIeYUj)KSi(T9&U203~h#}~HtHE1b9jb)v06KGb_O5@3nxn&IR&!xsuD-;uR8@<2h zh**epvB9y<$huBPt!IR$Re#JNU2SS~p;P`=KXObmC&$8bb%@k_k^k{lX!pp(UCQ%F z)qSD;?_qm=P`dElf&9{tjf|duFca)kA_qIu0xoa27D<)~go=l7OIdB>&|B_oMLpSV zj779Q`{M6yh?SHbk4$*LVg+n9%lI(k`$ssM5qNKbMdq4SZA9Dpdqq<|(gk&NUyn)z zlIWZ0TnkD4&!!E$?d$0;BRAMgJ?iY^PKZ8yY9B8o%7AubaD^vaguMFzd6WD3U%z2J zGAoc=wr*hxn^kSz)Od4noN?FH|6914(?iiBoR(bje_eHWBHfL^zc&1BPUP#-QSa(s z{YX+KyGVcPo+U47hAbm}wymEaXN&cRV~?d3u>Hvp#zvS?Pv7FlK5aeS zxpiV;SjKR5nC$3^ZYsTLqj&iQY=0H>fmis+m?t#UJe^tX@wv5i3(|C|M%jt_5(($qzEd>mo$SBls!y<|M~<@<)Ycjr$J@hKb*p&_`r+&!?` zA3zc*C4H3Cc-Hs%BzUMCI-=ZlpKS@W%QT)tp(tf%OiO6Hrbwm>w!>}OpJ?e!{Qk(S z6eP~=bTN=~edNZ%>ZoxU&b%r?5g<4?-Z(U4wWpiJ<@B{?{#tqoMSa*I zGTOtJqk&Ig&uy^&ZLuM{a$q;s+_;>Iv95v9E{7ZM@y78g8hMnu)vR)17Luzq#S_{C zlKRE_AZQs}l$8TdveS%y4ysh(g|+sbh>~rXzxbeIPqbR&fi2hGeW}5=OFg3pk@K<=H5{hq9?s)1PM`xGvGZ()CA4yonvE=<)AK^ni#f zr@{|l>A9Xz{0F8gSHfx)PP5+MeV5b4!fN5!ljO>34zt@$-Z-q=yokUWOj~SBk*z!# ztu0$|hY2qsl$7K7HD2t6i@LpiO*CRD_zidO4r9_*zSg__la26xe^;k=eH{4lBq7W= zC?5H$Dcf<{9tZ3p9m(FIi+Cpy+uS$rJl2iLRPtGgbA@jvO0j}23U*uJHlS_E{PgPa zV+@I!f6|57@*R7gV{=>+W+siY`y!5M#AC1fWyMAUvr8)6%doO);i$#aV(Y{i@Z}-M z+&`C?$Le@V1}R)Xf8RO}!U;>DA`6t4LRDU`ehx9LA`?Vv(f{B{6~a1%$GhzJ1*k=s zzgLMB1dVS$fx9e(%7SufB(O4W!J|7XddW=(bQJYAD9~Lrt|otNx<9lQM_*}Z-hFbD zFY8Dsl^CF>V=(txUIZ%nxPy3~qBp`+q+eY#tiPcm+RW&@>b6ZQUa0R~i1kfxt}@B)R0(^K;&I4Gj_{!!o9UwbxGC9QqlFfBp{8ohzb{@dCu4L*XSJ^Z{^e~ zQ5Oq>LuqFYR)&mLyWG`0$V{bZmLMaW9M@ldHQMk`rCUx9IqA@R2E_QsYgWQqokMpFw@nFj70tQuezvCwAqoCWP~DO+1~ke#CS~B zc@OgxQ?IPdZ8J~3lGIaQRbaR-CG0fs~ZbK-Fy^996+}!Gy z8sR5W?iW8aebWMxGPUZ2AH);HzwuZNFhA}#%(Apli8F?CR%u9 zdQ-jKwwrZ7N2iKjOW&^~R9(aOwmVAN8%2wQ*W$dq3DKkpxQ@StL`JLGgZe*9Bx;{I z%zAl)t1lGWsRAxz*Q^V|iM%Gqt8vU!#dFF^Ssce}C)-YHZbe#@#|4^bE(jAjZbruvk%TWYu{iluAXKCy+w5&dgf-JccHah4Xb5=R1~ zT;OI59h*9{ETn;d^3d*b3DC2StRqfqv+8b7-=G*zzDSj^a??5fb9Z2Mq z*6$ve`D{3>JI^;}Oz!Qy9oa8Pei-ZnRD3>sWlmw#fdcPzDXz%sf`zhE+i^zQOLDlZ zoJJBP4j3fXTw46_tX{wj`Z^1gOrJgX+?dLF;bL!B&S&G(7bRc#oJ%Ab!yD(%EW(1H z>SiAfwrPXNv3YS8YJvqT{G!AZS+EmXu2OtmRLd92%jG7Sx)1!}GN=BWGi9+X1#e%& zSKI&GuRrrg__R@QjJ!Q?l-(RQm=6i$rI=OpdrwOFXJWTb8z*xwb}L)%j&^H9j5o^P z27NXhgEtu=^8-;mU&tkn6e|-ma~*HYKE8SB^uKQ!l<$QLs(kQ&M7qe;Fr|N0T6^s> zoIWFUZ(0fYh_^8!(hVIYK(jutk8C)gbY(z5Ngq!K1WZz@ zfv8At(ukmf8vJe3otH3M4M3OqNmK2(%*>5(H)sd#d-z4`i{FlILcjyNwIpz|4!|hT z9j=E{R@KXGar`?CcdwVy3t;kP zL@(O0>g)0jerEhh{>H$bS8*;g7dF%i?Kj*fFi)Es%_o~8ANg?t zgE#BR`7XA8mCk@~U#=f^q;KE0oO>L%dYN{CFsV?_(O3Y9Ua1e^!z^W`D}iBc2#%OCVs|MPc&y@X-tCjMyorV_5rT@D^4 z>1olKi6E!yj+J&=xH2ZKv1K2hc`OvSsn5!l4v+~BX?nZbZ|QU8kC8j5T1#&&0mE{X_kaJd!4S%iQQ z5Eq-gzA3)|cs@?r`(>8}9WLQxd04x`4@riT26Ps;dm)!6OPRGe*F?3Vle}Er-{gd$ zUyNC`e5iF^=Meu~mB1XrY~Xe+Kt+bi4K>1MY@)brmYLO>jGJsHpyijHgfQB;FcGL? zDcjngd_rY+`lnCYn`_vwxlC@TMMp)nv?b>5DkU1$@pg~Fiji0o^AzIR z2*zz*-bU)x8m)}-)n{RkQ02~le8;~tEJc#AS&P1oq$2$qktiIG#|VGY>4TR5Q=e#6 zZWo{wXY$8K!oQ{dl4h*(2UgR29lDfpuhfMJQ<2gpLP>;r_m-vdC^EP0jGl@b=7?og zPeMN5mfWc((hPxfG_rS1=YUU<*qrbQg5Y#k)<_rUp_tgrY_YRb?eTFBtu}dTL0%aB z;m}=%G z{MSa)5nY06Zw0zD)c0snb$oLlJ16o8%Y#dU7jAOUaaO*}%^m1-y6Oe@)!WK~!bVv~ z4qo6s{B-l{$jq@);)#A>D&e$pcMbZn4LPpn-DtreX)OQ)&GK+&w*`%alYg)tqR(4E zTOI(kWh{pW)U3c)v{~l~|Kn5rk`uH{>T!{ymUH7Z z%S+l#jdlftPtEbCMx4l{cAvS$yazYU2ke)g48j?%Yc2rNPZp3 zN@XM{jH3_$lq?>T-dnayxI=GQP46e7?gN3kkI^~btr*5c61}tf4l1+`^wEn4 z{%PX6DLt~w=HQ{Lx#=xkQTtCl?J>vGbWwTGr~1EUe#YpBV=Qj)PLnb}Vv0MsInfvd zeo5ge@)NQ>>$yfLbp^p!5Z7CDqhawNPCV=U8?t_hv=-9wYUama5tOSN-difQY=7)h zC$y)8tw_>;7-g_!E9IMZNV+kK3ari_TjwJPKJuM$@-o`|`Kehj7wN z#ojkhETJrayljR+tGf8#<-@zb82VAF+%-HObl+e_sBt%D9{D=>{iIt#F&R>8Uyna| z@vgeiDqsE5|FNXeP(rVv*5K^NCS*ezM5KTDQr-ShEfeVNh0{oSSZS~JSH@BZY^x&Q zk6nQ-A;DdysSCv{o+3rqH5UK7`bjK6cW}k|jVLSfb%DqA2;w46!jbj%Cr?c(T6@MB z*7kJ4bI8r3WK)WuUGn-)R3=GT0=iAk#cKbyx*-I6qPEOW$+zAcMb5eKBVV|fjZq5y zxk^GwK~pk|-5nGC?(u+8%4#i&PZ%Ubp6m!${mkP_nq>lJjt!+vld^Z@&g-yJIR)Q3 zT-l&r{gmVdekjxn6+JwUEC{6%DI?26|NXnM&X8b-$byB8Hnm2YLyYa$h(}Bo2_L1Q;bS*i_$U+nq zF%YpVD}K>5c30VEF^sSz7HX)nH*Nk)ofqEI(<7?z4%M7-h|vTMaD_(lJ}K$5J>~N^ zk_Ws^_X%c$y+genXgW7YS13YJBBiY_R5W!L6fB|LOLp$#A0442zueTJf6PRvtr%Km zjK2AFli^Q>OZjCsJ>ki+_S-!axXR#ECfenZ_8^ijX!n{w1@FhIXV2TaiF6|` zU~Io+#RZ~u>d>e3k}%juJd1Dna`%76jf_QC*oEuYQjjll@U=jGMKyep&h9>BhlQ^_ zB}*$73k$ND9!5B^RZ$gFi1mns)8;T5dyc_RMibFKZ?8Ye#jWo-P;xX|6W1!lDWEf+ z-b&!Nb35(%a~f?pZ-IjPa1iLXa+B6>uaQjul~75PYGUHu;8Sl{eE6%A?t9svi65~= zQx8i=&;IGz1ckU{xc%2V@M(`=X7h>Yu`p|H%fHl(Z!#4~v?ggZ0Kc#V0miwO^r>Xg zKSas<-EUcOJN)6lBOoL^NT0(OKgNNiTpn}(0GB0wu@}2yxJ16UHox6m@u*PBk4iNR zs{A!C4BlncLmoscR zGA*jo$~(Mftq|*s4@c;QhRgA(W{j!#W}D3WEFk+u%Iy}afADc(z`$COk5aQ032R4t!8~U~7nY!M}GbkB+I{&I)r@(vT4MPnYZ*4oxs-N8%GV^G&Idwh$ zFwWSHT$iu;&WZ=U`8qZA*!QObzncS(%=DWX4n~(H*U_bx0Sx@Xqng#170}~0Z!d}L zD;anmyau>VXr~Sa2w%I=?Ea#>e+ymXGbqW+(T9{-&yew(QD69ND=?bN;IfA7Xyfl; zElYhgclYGUpS~?tW(y$#V?cSB#$4>75fsL?GYbPI zxy%_3y_~T6CMKrxa{|sfUlH~!h!{NwT?s+%A`zo zN|ba1d1Pr*i146m0dluw-H87dm@;7Xt`9pW`c*i1c3$7g6lkG9&d_N2o0Ui|@?#WP z?QKb_Jgt_i$bTq3OKWVp?gE~{j~dFYO9huw$G&Kq<5)jj zICPB!+q9no+9WT zWH;9GO~Uz@xbEZutv>Kk#H@J;C+2BK=rbebHK_zzWuesH_dnGSX-^Ln2xy!^A)m!( z_|F_hy`HfZlZ_AcD%2KNY^-*slOc%8BRPDZaq1x_ze{N5XucppS)O{7rxmkFBa9d@ zIAa&!o{E>WpBDLfh<2wpu{cK64doJ_?k&$rKpl_2pXkOqs@}Cpb5n0UlaD&s!s{{{ zylvB=@8xYPj(+?+(jSnEVCvbg^v>L^S@7p+P;rUY3CND!_B5I-8O)8IeFsw|xgNJ^6^Gn~Teb^BFi1z8Nkv@IynHq?(tz?cY z9m+l_>kDBf`jTeF-au~C%_7D40e2gHyB{hR$D2`H?2A{xiq`Ykz5z2c7%3n#=?g(& z!S~m;$(yDrtGH`lZe>LBP8HadIt3_onL5H@8X`_wz5ATOK!NdU`@6<)BD->M7=frO zsj#6JUG3>M&)%l&Vsx6%)i)XH3c7jYe(anQ7FOEs{lMJztM#F+$4m8W%eOedQQg-k zai^7aypxhKUGYeSNln$HYcQ2Lvqou$`l9sIl+qtXRT? z)hJ6hnK>(p&C$^P{z2HzxAxW2A#7+P*aqe)*N9)bHJgo2a z!&yy{FCXiYw+?Qv_lKvlG?ABlK19ZUL(7I5u<<1^RL7O(z! z`)zP<=sF*iU2=++?$in2aMcF5SrrX^A?#D!8sL?1vN~L# zh{tOY{aS?Nywd2#t7r}_C$!C_7fUAUz`u)ytn1eL!l(lsWUrouDmcVHdD&w=Jmo4&V z@HM;ObdNo&hpGA%M^(I&Du(a9SV0J2o99GcFC3WxUw@PE4;lO7E0uK~E`<@Vorq_? zZ6Qin%%?H^#F9U54Z7U6H@HiAE*wczx%dSmnccv}Z(&R&HHmKQZhz_!Nk zRf(Z5*LqPS1mXWwIp!`TlRabY*wpRdYoOdRRRYf+F=*oRVdn5M5HVileT|$%IbddAnCniv?w30zRWtP`2mn52dYatbew2J*&Hvn^lsTbS%hqV< zcC42KTSU)Uw|95-Vf`IbA;m^T@4J@Nt$My8A;^_YU1pg<<&a8vRysnk%7Mf{}v+-5{hP?ntS3$sSc>3 zS9EmV-Z|zrgdX*W3Oig+;`fK0b~s)LV|(lHVWS@XjM@Ba8?BDW;x6by-Sa>E*Z&;C z00)mY-=fbLF&6z^bWn*4ade;U^g2MIQ~P+y$Tn97ZwjySHJW%*WxkU zsXV)G2(9p58Okj)kebrm^eM!~R4ZeDYV~;tN~%(*wSup$NsL_FVsw&PDeu^P=v#`o zSGXk)jz|+wQjBc`|JU6inD^{p`-GpN*!r9Cfdr>;$*!e+Z*{(dxi3SY45N`(be+~B z&F453Mm0N;wM5Yt>j6nxc9(!4Jzot37od3cBb4;Z zFI|rL{M1*1oUH5#;TzhsSH{l(Ux(!KWud9E{|wSQbO&vC<=p>5USGti7o9Qnzx{Xdc1) zMTP^m4|c=}k*2rk&_J@yd`)HOhN6`rdD_nQJ!Hmf^QRJJg1?&?>Y11k5A;+12|LN; zQ{y=gUCfjrV%D3U*Sr^lwE%@AgjL2v0hHw9O)m_N76W^lAZ9C)F3M>5=4?BQXpIUp zM?(ZpBUh0V*eoX;0c*~Hm(=bLI0kQ2^8Ib>##dE2g`TFlAxxXGI1j2l=Hm{8L~Rh{ zc4d(#G-05#vhbKZgPc{J_n`cFjUS2jLRs0IVrycO(+gNXm}^+>mg04j1|7PbIy8?H zhp<%!yLQH+1OZZlwrn*YASLJ}oyVUU|5u#Zqw>%2BPW7I+cYILM1_}UF>Op9m+66R z4D?%ZvDhBxsC|6HKf-2g4+{ zHYDz10K{Qo*|X#(+UGys_l2XPfgL4(AM!5WJwAe!C5~%7B`mSkE<1ntEjZ!f3~ydm zB5M+{1vodeCsq8(<;15(t%oL_a4(h~6u0>hSYj^|^|EgKDN25^FPTm5pgFPxp1~2a znSRe&`~GOR5z|kKuuc_eO9wOTxHX~!$aE=%_}K;XcuG5QX+nbNn5ng>n`*W?JMH{Y z$&#c?<^=jIqImT216lyk$w1cIN=)e9ZM>~Z_6Fex^|r-9Y(5iNOn)q{meXF_e_R zenLc;WOxMW7rb<4ij`icu+CZuK787`?OOKd*jkH9%K&lsPGn-)q!S?>C=*3E#ZEx& z|0B3gOE795pX0&;!kO3TUnF|J!7<2LKeGc5%H^EakV+3XX8^{W^wr*0I*h`{s9N?W zutPkC0#59wKAlPX!`d4)ej^o~+^vY2dUQS_lumR!ETs@=&oEoJDv@*d%)KUwU;mb?U7?XD6X784fo3ll2W)$aj!sq47)|FUBpeckO?P zXtrvgXahT_D9qnRUdG{N)hp+=UCV_B+aOx^`98X|?)?~TaAZx!uH#Bc0ajR)`~l3a z&X)K2+AHG#STTku=Or+oW!gXY7>VCnIkN=IxU5|I*oAu699)rrVe82?3%mS+qdA_x zNNm5@lJT+J@12k9X_{f3!|{ekhucJ2TN1QXoE@uOPTIfF!A+XLAc&Bz=0r;tH_p}5%QVjg(war~o>Vq#;s##63)y`BjR6-|oM~vLQ z;v9f4jL-w`6xE7BIE`+hE|I z0Oi9Z@t5AoaH>>LYaAa?VxLZLj7X4STo!Rl@=C28SVm*CmRu?#nSAD+9}(t(s8l4O zy)Bj(Hm~v88Ss$o+^w6mjjVn;F(st^$P|W%$x$M!#ycZ`_;+einF3+@&i6#lKC-Z} z(o+HJdjOCA>faL+T_Wz*%=eB<1cQ56-Ysc~-ytCROs~N+W~$uclSbq#?T;*yf?f07 z#6dhnQYh(MaAF_Op9t3GSfuKzCdb8_#)*{P+>8bJOqs|l2SM40iHV@FER;Ce_8EqA zFJ}I1%dbuG9%q$LoFxUqtSskz=6TeePplVozkxw;Kfez3=~}Lx{qd7{R&CKkmmnp| z$WAI(w@XAMl_>c>AutZXLm>%P$3BN{&^XUUDGxCt9sswV`hS*Jr@t>}#wTO=P7XW{ zs56}qnT08e$@cmwPxh8-mYqY1}8g1lgrJ;4^6{g)?*;xMcDf8l+dQ)T6lwT-upA%gnCJe18B~o_7Ch-o)mVOn|Aa zalp1h^4)_7@6lfe{)4?*Z!VkC(Vva~!j`(YT?eZ3@6RcK)FSPH(?B4#$imd?FCS64 ztq;PDT5jNSx5I?X-yRvIKyu%+yj7tZeEP`((}?410h#u_pF~czU2RjhP?fxu6cqzx zG+24QX|2a2VOG*P1MuFRu5RSbOCZxs5&O%v@s{Y>ocM+U+3-=W+TkIO=zg*_;vH!s z5%y0B7BnAf*@1PWEy`#7pN*J*j!CditK3D+qSbrT1`^w?>a9W}h)COKVjJxl$f15- zk4?P`p-r^Ze_OE{==bT=M2}k|11`&SWKLudTg*=>*=>`^vaAVsc$=|3cZ$$}J792n znH9ar8X-8pZyER78fwwe$QO|&9C~ZeS7>dI$0kww9X#nJ8?%EObV{H=tV>2x=`$~`C%(}w65=9B9}=dfnxWa)txG^Cg)AZB^@79lyLnoD%J z(&D`w^ot#6b@FHMSg!$Vl1XOpJS-XzjaW^SMBN4Wp-BcT2wP|c-fb&8ZX7!Fe)AbiX+c<^$+8op_0FvG7eBjzE6( z*PbN98yiMy;gz4)MP!^>rswTmiMY$z*l*W+_iB#Ab;^u-?_kju5MRx%Oz!_Z00=t& zXd{~^6-9Z3d-oAC&gH1zh=Y)kNZp=yoO;7}$*Ge@gQCMOk^JoXGV{hPYpqIkzNer{ z*~8*+{Vvl^!E!kiE`b3fztWBvBf4Fzz1*AONo%L~FxBUb!N*>}rJ#Djzt@Td$2zOQ z$erSd1?y0s|NFlXV12#8+B>egaX6E*FDY{7MP&fv*5e$$pEu{zC_EUBg-rDS&H#SG zr+$p^On;;_PRqV#Sg>;hO{A+6&`25)p`7Uog?e4o$9Q?SjeIo;o+{%){ zGqr!fRH*XapC?laWzmr&@07P83!_8F^*ffYCyOy@ERsv1et^1GUp450a4)OfHy0#i zBJQ)1=|u;7y@HxQ!B*TGQzReo4Rpe{PrL{m0S16PLUBsKe`*=FK{hc0zPPwh;yhlc z&g(6+?Ot*5m>DW}JLf8y*}&XdpgGj%jtBn-U=Kkzx&4i%~_Ih-QUYmg5V<0rj z`Q|%d$BV%Bn;~Z*|0%t5Wd@+j2oMMet8M;INcc}+OXMAw>o3BbZ(mY~wbO8)S1?wy zaG|WPiMsNK>^i7A5i_mne85G)%9;N&T*oc=v1j`3Wuf-iMMG!RZ|J^f*oPfKxPG++ z(&v(bN4Eym@rR^#6=rb}c2Y@H>25_8U4~ugRuh9p_iCL_VI-aD^ie&P9%g5;xq+*4 zg9p>w{*3I10%o#_3K7)UXnQSR1pyD#mo@u{yNz!fn7jn)wTjks3OkG>ei!!anm6<8 z!x{#=R`+S-VMm_It&+cIar6`yG(mZ-CV*?1ug>$+|3}wbaK#xdTe}HPa1Rm)PSD`) z?ven(gFB78ySpT~ySux)JB_=$!}n(IGtQN9??33#W36{p%{l9-Qtbop!ik9kQ~Uow z&AqzJe$EIO`-u8Av4zFqkvE^4kB4##PF8xTIn6p=L)t*U@sq&xFZe z1oV4jM}f$pTpoc6kn#Uf>fdh1BE&ST@Tg$wG1b4jd)BNQvx-{Eo2@WBzy1`bGx)_L z=(Z4Z6a?9dDJkiyRnCS@BYCupWyJ^^(ex2U~jeFIoTQ-z12zn>FcLJ@SoNA_^pQ*wOS`xlcR@S-Ju`a_R;E~>j$ zG!evrPw!d)4f`dvZEdHx9#+H>VPdF9s&#?HXPd^7Z4N9@axL4mM{O8u(NIN{b!+x|K00 zKDG7x8&;W$#AMS^7=5NE=h5lXHmEL2O^Bbd7h6HHw~N($pqk>L5iMw&gHHcmq#)6jIRvl zs%PFonVOZR9FR?GC=qIWfJt83g*EzkPVieT*3)nr5I#U%NB_ z^R%b>taFF{>F+<||Es|qymGqenC_${c4_wR_k1^O_r^-iHTKItvQGKCbRv`{gQA@} zAwaT1--a|inB(=|-2V0pKs4*%j=QVc@?~5lS%PplIg>ev;Yx$@oiWv+!|r8+A*w!d zV-~ion*6Z>`=`&d@-5*n9Cr?o_x*$8I={yKT#}ceSJPDmxx4W(^|KZ0It5h5q$mhI zVBkWCD}R;cLlk~1Zs8Y4{DDLGA(tBaR}=3*aDj7d1Z=O6(~{~wBhSP%A54{LrBn_q z<+pP_7DSzY6gH%3V`IkI0|@FS)E6=Ut`i9;ZX^Kh!yS0tj^r`6pO1QG4iVczV$-P~ zTF{8JwNBG~oHe)Sz zB{gGk|D3fCkj$rs4c3Veh4H31b>OY6v6gGDU11BF5FC_2A>;6LS9Jj~89#Xr<{9eW zv$N%Lqn)=3YaRt zIqPD(XAy9-A$I?Bd6xSRyG8LV(qn{`E;bAqF=w(g#8AG%eF<5OnBfI8`B0ZL1Q+P%pwejQLj~5 zZ9S}}m1nLds8u{raaWV`i%-Qy07^xo(n#ri4=}rYSqRPj~+$Ds5Ff%eckynKHhvBH7+y$-^~M%qmmeu z5U=&NZ{D6pL%S3Lp14GCBL+v|zRZ5=F+W?QlsxuZt+nlc`U}ABlBDs407jF3fc|@I zTDwYy@%6F$?i5$cdf@nZ3-gIrn#p&r49zjCy9Qv_eNha*33Xrdj#g?&583!}rBopo zEZ_Lwb9vdHloalefS9H!#m9`GOqqK02|4sgohDK#T;qPjXmK?9^hRlwZJHNrlno(n zYgMLxg8GeRt_SCe!Gl4ipZgPQbVEKgf#ne2F93(EMIc)d@lMJ12I!f2K9@z4Ad3_|_(3I>p8bl6&W1VPEbR@@9b4 z*F(CO_=ZUkz`O@E$v@eOq^e%Qb&Ait3^;!DMhk?z=?U5e3PGdUTA-7IBPtxetGNYYN*YJk+xwlnUv9P0V(x z06+GoheI%%xExLXR9)~`QCTxqE05ngR2psZyWW}l1o@@gJ{9F^L zlQuMS7bRS%lx^6Ipwt1NbMfj&{-JY`V9FP9Ne~PF|Ayje?bAc2v_j&Wna-JjMMO*k zjSGCvpu$f!!kaQ4vxJ=WCpZvTg>g@Hbm(*V3xDFupdT(uyonSZ*igOv6i9o_t|F_y zU9&}a&zzsGFZ^zrWi#WQW^dzL9S6x(kC=1%aqp@RY{)c)&xa-GHXbHKvf%a)U zMe{XuixC`FUP-~ZxVW^WJ7U7bBX__YAm2FqQGkFn2E8deO1M>vY!I%7&ZA$fm)%#&^V-{FpwnKf5 z7wyPn;O)dpOs@{KTs$Y~%EFFvV&q(u`RU<~N8>UCVTvmyq3LY`hmA%vw)HWcYF@($ zemCUj#-G0zL2;5*ORGTS2s@&~5OZmUO;!*Q%-kXKf#c%6^WW zvK5#8?YXavZgr&`p&nl0*?&BhppHT85{oxfakse0sy#x1iw^n7l`HlMbT4?>JA7zqFv>1M6rjQuxwZY-zY8&@#zAy?yD>V3tyE z{b0U@B$bUQeI4`d*5><$te|)sxHSOCTBrKDXxla%<~Qk^rkNeGVHv76w#!C-^v`vM z9Y{yoZ=CH+F5ICj=n0@P<$H4)%Q{O1x4tB#`gGnRp16jet?+^Gz|5Yp+Yg0Ws@By3 zuiM&ml)^TWqqg*?&Y)8iV^`=A$b<(@jo*Toi+7fk&J#Ut+DsKjxZ59fj;N#I81MCU31^RG(!;X8a8`YdmWc+J-<@+hC z-q@HuGqRN*6mhSt7?rAU;2i^llNodp~6$<};GTO41nttLn6 z4wHmlz`{exkq@Uq^yP-~lU|>l2(&2Jz@>ksp7>!NPN9b?mF1}g2G3rEd~WeN7R7ky zV&UtApmg5gokI&p|cY``r4|Uf5yI3w@TF z#m}28=Fg_*@p;bsr#%G>AXBX{4)L7aV3O;~U6)ggN*YW_3azp!*3=B*?#24eiQrC3 zP1QlqswmtbOf>RjFyu|MrO7#C=Jjt#V5S)`l_v#lZjvfh^{?rV%lM-X z{R7f#0J>PAGEvb2Sso|S(dma{m8Paz&k{7@-BT5G$2G=$y|v}>6|*hwreF>~#GCW^ zFBN$rLSovW+Wrefh3!{M0I6z7a=XlHiNjCe|KgM|sBYpw6z_|`6~3!g8fU4zVMrA6 zti|?YrsZ~$^vEFzh~a!Wls!LVw#?cjC%lQ+*%zBYeHK1NyKVnl)@JAhC60cCeYzC5 zAbP7upF@qL;O2B!Hy1=WjZkD`aI|oYh%%{7deu-gQY z`-tV+FxdAb<2|5?fu7#xP0!-Zlt^?jyHOSNI3{R|Z^81Ve5M`0C4%#nk(>L+dzi!2 z^cA$`AP>B?+C3D&Au`+VdegqWT9q5jKwN|-`41<71Tyr99f=_NFn63jv5FhbD zQl|E0Z<^Aj?Q2S4bBo{1!vKy7%uHnQx#w|+`Hj}&Uor!w1(Fr#1X1%sjiL^Dry29vs`T}g zn3Ll?aI^mP2<_T!%rlAZ6JMjI;TOUaq@PL-Sk9@v_>yv%FLAm7bqF@w>zXxYD*;*=&A0UJ^4z2(ji(Quon z{pdWnsZdWVKop0`xLW7<0iI*bx-Atdl~1jWTCQGFt?Y)bm$`&8&&5cpxKx8vV)#`~ z*KRql8T{>)DSyA^v6EXAT{}^SlQL{^=Y?e*2K>DtC*2OGba`O_hSz9k@*88voJfL= z_{YIIaR+;U59jQV#x0~Xo;d9aY$EbC>Zy7P05j$`a%)6EK##cYDhuk~>83v?DE9mD zlNar>VIfk>rXm`y3w0R1nVBQxs)6(JS_93j;^erejo9}D&ON6717FOKiqJS@)8n7$ zkOJ4mZ-S+?pXg)Z1Yk)9xD(<-lSSRRW&g%-f=y!`Ci#)AFEYwLkRZ$*#Ywnsa26Dx zmZ&|W*TtMejR*OU3H{8~;r6-K4gdZ#rM!$Xr{?vyWT6t24iA+}t_5ohEDFlC7@g95 zQ}!JtrPtDY-ZG0@PtP0iRmm^Fm0&YQDm9e!O3Flk3Lq%Vukb@ z`U0A5(e*X0lPI1ws`K&4q6@SSTD2jXV1g^Aft||`M?BH;m43gO#es=)Kg087L2;5i zK|d7NiKdXAzZNf>$!=VHIGpmUibd?mPYZ4Fu}%yq@;YR6I>DxehPg!eYOFEhIr0|w z<oJc9K{0y56D1+$3*~>FU_x2vMksJ?i?-VuhD)Y}}shx8xIv=?wQL66x#u8aflh zhR69F2iez&l;!09yiOTsN}5KmCj{{-oR!j$^R_wrrlmzj;jd4%nKwCa3%)le#d>hLMarv+ zG`5;%s*44Iuf_RI6h3ViGx)H;4yMQB!+ch-3EJ4=siKX<2HuTsbzdsdcxSJaFZ>yP z5^z6gRoeU{sR=}uFlh-~kX1une+xV--K?GDM*G;jPu)qH-?!kJCw;y!eiF}HvxUae z_iRlN?W^PQO0utGEhkjWjlW()rPvz5W_OZL9I(9;yU&Bcn=2vSYmebdZL%TCbw1jl zP8sHGq{NaZxT$cXvTKapo|)ZB9H|M5(xtB$Y1FucgDcE?MrOzyzzro@ zDB9gN+C6i$9Qk@h;6?`=fS!sbr-I7_!>gXeKiN&#j^AJ)2#d zGc&CXzu+RmZA4J?ozkNJ+u^#(xDIVRZd;S^_jVrE5O*^28C)GbP%tj>y2ngw>zFTM zLLOE@=`EI~-EU?;u4_>wG52&n%R9N-5s0dET(}U03=5`;jc^W^-}1tX#od}PSDyTLPD1>reEv;U(PlB6i5h&XA zD_t7eKN6i(7H=hBB5J_6Nu!dX!LZ9`Lay0Vnxzyr)j42?9O0-T2g-YP4dT*b4!des zND`}?${;~w#9p`Tj=);2!#?!@H1lP!=;H$M6o-OV%V{`zzK=z^3E$BM9%bXIMG+<7 zHl?Xc%VWhE$E%|$kn+}tXBii48W9F@xpJFpU5h__z4bh+8_z;bLJo}~0oTd1;WYNfR zX1Zl~A(}t{Fq>!T6b$1)K&vJ}d11=V@pkl6ckc_|x1XSz*5G zK_lZ7KF`64v&!0mioHUYf}v#*R}zS&aag@1L>3>};F%Hn5ZQ*OZoBIEiLB8lyJIE) zKG-V4Xy;>Ya#q5dRWR`bm1bv$ukKY|pwn+1-}5Iio%*Jrf4UPt*ra~OCPIinMI)xW zjJuR91-hk-^H9mIJZTf#Cdw6`vY}^?LZ2#n5{fTM=mR~&(nq?UddsD$qmXRxpM0w= z?V*R8pZeq4JEMBve=MC%soby&R?VuT)F_Fu0LIL4Z{$b&L)AWp%D@eGD>2wM5dhRD~0FdNNwXt?sV=T4eW6@JUHjUW8%@liHS$&YlTeUbex*B=qda?rb_?TfY zKKE+oguj-y4Y+|{hnX!66IIVOB_K{n+m4*TkL9?pLA?6E4xRvhkmaAp&E zTSO=}UYHl=*St=eCzpiul++rh)$TN(JC_p1!DW`K68piYdi*<)5}M0CN4u%B!9SP z61rB|+QiGqoEPu^75k!GEJB!fTJHE`Xv7YTD^uN|(BKot=h9?b%PvlGoCuW^?8>!& z0k!JALQn6&bpSJ>DN?t0iV!eR%Su!vxu`IIjWRR{zLgi_c)!_8dg>xV0eJXW|$j8|D%0d~p5^R!YDK%o1Z{vc5-ON97 zJ{D7&ca;pjY=~znS$Y3hWbnDAAZP=-){6hJrTfA1kKyGi>%5Tu^1uMt@@-FXJUy*4 z%)rnc%=}uO>hZi4aq7F7Y9_X&NJw7M9%GDNYTL9v#Pql^SrneSfEeCn{$*&+-@rb! zn1RHTTMmhchz2PlIM%bfpeIhk>o0ak1$MqMwp22>dx}qS@W<;)UYY5%rHnO$K$>P9 z#PU5vK*?Fl$has+8!-cvqx)E|(d?bh4#ecyBXuyfV-lj!y#0L|LHo{|!0B`RYaWL6 zwMhzQF6>AT*x66p*9W#^MSHo0=T0`6c4EFwLbcyGZofKUn||7(^)mC101efQ8V!%^ z^UZWQF5S#yL;ezL~oAh7*sfw|6=>+4^q*^=1A~V)K zSwNlxb<7372tdvAEDt7WZpK|rpxmXK#eIE!tJ>V- zRmN*B@rHEGh=6Zd%BArn!h2h|%0}q^`1f-0F*RzpE-FcNLlE@Y?f9`?RxNIng)(`P zONcF8bFv`mX&cCU-`Z8}Aj)5h%KVIg9f|P#TKna3LvWv?oPfj}Z!xl2oggc-i|C_Q zD`jHeOpti^h;Er859w^MR|*k-2lSBVhauc)fjig7$4>r%Q%C`$(v5(yly3FDDfPxb z9|=y??~Wl2Oph5}yVIiSyow6)PM0ob5GIGGW~}X1M7}``KBm&SUR!Q;nc{O|fBIzh z2RrTtGL!|6rg$f#v$M&?{L&W*m&o<1{PVwl`4?4f#ICPUMxAO=**hA)QqA99dD8bT zV*Ni%KAjv*>gQ56_P#JWaGnH|fqer(qHw_qd{c2wIl4l!$z@Qv(blt?M=p=Jx)|QyoMGo0aalckT9FkvVOs{5 zCy0I7wRdn9wLDD!?y-Wp>bZ=w?`Zb7q~s*~)3BdaOuh1LErzaava3|Y3?r_tvAq6q z$)%DPpwlMcP_$uo&$P1@RI~k}XR8`$UV=wSmY?vvcKbDBQ9jwh9{xRZ3#F?Ma7cLQ zc!V^O%_FvOv&3FjULHIQxo}Fm&=uMJ@Y5M)0rY>qh@IO{FYjrhP{ju1%14vU8#FQ6_&1sQMRm4eT|9*H?V$BYvY5qUMk zs`JhXuF+xpB*#mV1}?;FU}NAA{`VcZvZ?*QHPS0553dg28|WfD z*V&jD+VGya{)lW#_O)+lqS_92YSX(2@<|$H5=d+)#*=<}^?Esqar#9G!K-MdA7}a` zTl(nZX^}}v`ar0t6m8Uj5eq37f(L74wgi1(9>vmH(*I|-ebhsO0VlKLuI`jsz)hc} zz5L$~Zey@x(AX^ifU+SN8g;3OMB%7Qoun>E2*prBT$jiigD3|NB(AN1Oaatxo+^06 z)I#cd!2kQ7)^t7Gxof1-$3-sALvQk9MH)<<#(b6h?KHK({elL&>l10|apO)J!4gNtQ8i!%|xQE0USVt!s+h<%Fs-`U)?08dP%6EZOO?!;6YYaeQ8_2 z0q06K6^GC(aJ2^$f~Y**3lgI;P$EzyG^2ipk6N5^f?|Lvo6sRg6xAgnLAPc zsowWyjGhmX+2Dg*?y6viy3hQ@lrE~)H%V6o zaPXv$4x%B5!fxZ9wgAy0iev8crEW*?S60r?cVTd6wE)L#`~P}d;_{W zN6q^*9_^P>#@OtP#=4nF)EQg(HHn;wz;6t{ zj1qVc((p_wzY?`LnSGUgUXzOu`}|cjdMRON?h8;~R9ML5^%rpdsV%}pvZD{F+@M4n zCztLxTddiW@wjbiU|fazV(5pYoe(*d;25wmFE|5&)L0f7>q{<#!Wecc;w; z@A)(lJ`ldIU37|e*Y|~w_M5>h%j1UU_Xsz{V-5nc<$M&|ImIb@cS%oMAMf#J6Ax0K z0)t?eg=?LW0%61Q+~Y9t(q_vXX58Z1*LhfvA4r^2|EF0}!NbEO_Yf+C$uWQu{YB_r9|Y4rYVnfoafdSiF+9xPnnLF zqL|M9K=b$r{#wyHAvZV`L3Nnw1nNR@SL&antJzm*yu%*Qe>=rn-A7aiYWn8#W;^FFwMDb(t zg8^k_W>|PTyOcbXNhsu@;qg)@o?`d`X!Trtxl&WOHTaPIlLWGfLeU#HfQoao&d_eU2HoJ)_rFbYnx- zD`q=)pOQJf{j(f9o+GUp^fgm@^y_Vxz-KViAmjw$Jr7tsxH#nH#_vde<|Zyt?Invu zsyqiOf2h~yZf{I~7N79WxK+wvLv!a`b>+xfG!Ssai3x_9BW;Hr*Z|h3o!-O4FEta9 z_!@t}Z_IjQWAw~Ol1dXkH=pnkauB|3ot48Fuu<;gEkMKap_`sB>{5?Z(a26%d%l!9 z2<>o$*~$H0d6pi1cUJ#bq&~-^us_kQnI2S){;vnoZ0xizqD(l#8ns<>aCs&p_X+M+ zm&e{qsXBFDSXTSP#3kb8G{bre%-=W=?fNX6IsY+Q#-tYOVQaGJS{{Z)SSBFdA^Oa` zTb^!Cke1nn_0j9a5iu+%KqNRwA@Lw>>c53h$Mc}t&a8VYQy=AvdQ%YO{rIZy@`Rs} z(jFP+qXFETQckXSi(bpo}h@MxJS;TYUd~dtIdsm}f z-@nn-FAl|sk)CinQAJbysgLUEXXddDD=9fHq5;wXn9&B+rrk5asaQ;MzFN(rb9<{0 z2vP{@j*i_;7Y_PGsQrM&A=XK>9s)?2S!jABx#Ye~0zmWe_ww3-$W=Q`_QkxqA=`C| zJNS2~pLjsq&fk~pBdQ3$D|(mo+y_MQLzZ2S0*)m187In0-`Jna;?H?DSG#LjW;T`} z(MSDt*+a<4xBl$Q7`#0bFK3^|$_tGTWP_`{n-+~rW)4aE>peMMi5@d1A4uCds3`Da zBZ5B#7Fc>N6&ghb(k0ffnjgo(gto^;SbvT9;~~f{9OTrw5e6By-CN4>7#)np8PDoz z?2LB|!r0zl2g|9sH{%IOgjqT2H(^g*$cxvBN*&}^LlY85^C0BKg;YEgPN+J(N!(nFq=|#Ad z=#pN1%c!``N}dJh_V1-&dLYvqz~0(;b@oP*ibhglPx5JdTmP_nvQ7`_1WpEw$joH0q(wtLOQ6qM76wW&0?T!%@m z1gCc%!amuoAORb8t)}m%8jWK7geyGLU-~>{>f84z=Oyfg;|?+MhzHkRM6Fo_PDd29 zyO{7Y*VwXS26oKsALiLA_qf+54d*__-tTk0{QHC9*}$t_eFYIkZEAzDNNWsl2g!JjutJxEa1%;c)Be2u4HYacBq0DGE8I>C86KvdUve80xgj8_l0OD(L{*uW%cA_G@M~b2I9H@(+FUM z+CtiudtOU%iI%*+#aoHh)O|DHbU zy1mw#Q=*e#PFS+bimn*)g_`8tJ1uDp2cxuOZ|4Or)w4b$iDU0r%{Sv)Q|cdi4Yh{&qPrY9&GEfu56bX6(ZSAFmo?x;zNL-5 zpTQWh5aCiH@Cl?b+-arp_h+fbiwN!gE}*(0fKJv3Y=`Qbt@iQ<`#iB=k(%yc(V=+t zXuih5;t$)dy?{#@sKsEUOu7EWKG3|}AHFlhFQUj8OVjB1mOuKyVlC!%zPJ^<<8G=D&Z!a zwU|9TomUT&mj{i_E7kI}#%QF+h>5ZhJkE6Dw*7EKq6wE*yaf!UcA)H;_}`ccC0NIJ zz-wxoi@ZLvLK3#t$o$rpy-BPr@@5%OPX53uZ3fQs@0CwET_0Pyamn!=HYN@`7-QM7 zb2l+TG_?JeeOCGNW8Z+E*;QXN_=6~>$7M1v&MD-1z0{Sod{sEuTh$bnFqg3Q%F6CF zIvpIH3MxdljwJ{`QnnuOJKLKyfwjx0laDLY11W(?fpX1nS4vndPcZ6;<&Vtp68_vW zNKM`1`$&pN9x!Oz7H>El1 zZmocSu_{TeEzokxZ>b8iT;8@XG(OduG*}_-zBZ5LfFPPEBN>yGYx8HoRfKnq`J8DW z_+C)`kmeU5OVZBp*U7J&fBfkhh;u3Hn}}yFiA$pu_5OX@m>DPk<~J!j;}J*29w1Nz zBGq2zRG>*9?wM>7{Q73Mwib9SGedWOJ!M?m=B1TAx>QUKUxsA+a-vzbl#^pFYo>q zomg8Kwbsg=lXimObS043zz5AvG;JDh7Bs3l(PF=Hq{kRQ!o{g^U?p@hjx?QJD28N* z#`X;jg@r?U8)C>3&NK-@PZkqK!d&V+4sZ1`(`!xERF=%FdnVU?8JSUZB>f_!gU{Ij zT{#6vFJVTz>izuuzcO1?PJmc&XmsM|;Sc2Cb)$iER<~!O+?Xeo+*EtG8!=m~-5@!$ z_>DVZMutm@rS2H80qUd*x~7`%a!1|H{(FeMutsGk z>{)xe)Ol@EZznDl(f5;lqed_&Y}ZM06skqp3m^M*z<#cR8Y(VEt9_@ln%aNl`+kd8 zf57}EhKbg+E}8Ii*n6vrQKJRe^0|(rqE_+yFH!q~3KXG8B-@>p@UI%%`*ajt|2y$s zh+&$CZNd8hrewF;n7M*5C#g{y^}Qo=h41%WekB!EMj@q~CL=eq@c(olVe3mkc9jRh z2spZQih%6e+c6fEaqiy)$;7ax`YWldqL|YB(kog-JG7-)PWoj}fcXkaBP1vw8dz}3 zEd}H0DKF2W(YorSQMy_VK!a)g&{Y&PFtZ{Gsgl+WL?TuKH|i;dT-s)B#UahKb_Nkp zc1$?|+PkK%>^(5YwI?k;$WD({E<6Wd&;$2Ll&%QV^UWf8(oL3_(e}sWoLMXyQr3H# zFYDDXaSq@hsoyvnUH=O7b~dL+L-*xg0CyXe{-#*)CPpsCNL7+^jx4r#}~#i34d9&nj&fsvAr7_OXMhf z>w32P$x91x`ZNw}0*5zdMAS}^&aOVj9*L`DEZ1GyOof^1>2LWHRb^H_;7Beo-=L|Zi;XPBf8!nv7F%O$1z5>Og6Lr#c70a zGyVTLb+?E+=~JGrWNGZ5j89tgVSwAV|8s z7`!6avYrukidW2>6LD|pud0x=$#V5SK%KsyaRqrhbr6Y*{6wn3|8rA|$P!qQwW{ND zQ~vWFyW7}3eLpm}_-PAC?*ngpIDHJCG%r@c%oKGfo8j81Kcw+--TEqFzqX=79Y-T1 zj6BuLUGgt1ScD){WL?^>|47p1y1*C;|$KB!s}H!ulA8}U3cFa#Rhel1{5@oV?(EZXX@C@ zXOq_1(ku^UoI_Of_@*cHWm$sZkT=obqIP@dcw{?!Rc#=FK(f!@w3yit$T!OQJHueU zP?sOpLQc-~#IN(@2HCWNW}==F2@;d#-y_FWMXL=&YACwZNt|J*y56{0-w+t>v>{eE zJDaA$?y#9b^+zJ*M;iwL1tSUg0+zX^2r&(D^sUbLqf`3#n<@Ju-4MNP?~^j*h*T@M$!ft zTV+9dAUM7gQh}iFN*ekm!K*Vs83GQ7`M7HP)wpxMf*JKGwX*UAxPUypf~FP0fm z9_T{7`O`6awhaGu&8a}w+>UX=092QzQ_#H2sUX>}8pgY4rc-mfPmKmV9421Tlu70(llC=iCUM@5MZpW< zjRpX zbLN#Q`#`6*SyTy=GAok8#4L%zij=fHo2;2#enp8+YN=)pr<3#Q9MIn~r@DLhU%B9z(GtM=~6r%%*04*c5bP zGAWj6e5iGD(x3_n(=`a>)Oa=!ya~N`Q|>3bLyAxdM0Y zfFW*cUy@ORYhI*851E`x>%7%(uvzna99IoMZ>un|N#A2dNMn~3Rv|Oy_upELy~2?v zXLPUp4K}8j9gC&;eSk;cVAI-tZ#0fF>A7(GcE0}Gsq8Ucr>pQa@>nkno9gSVV2=(v zu|Fqh<6;vF#<=5Llcz{Q>)*i;tv}8c&*rmWD6T?nQRpG4WaWzTnEXAWU4b$_2%{5$ z$nYg2djJc~!%cRS@SPeqB9Mu%Dy}Q-yH^@!M}{kIwgWx?d_&v&O$v&&22LH#H!FP3 zuu99Ll>4mO0KOuglQ)`VQsh|eBW$ z%*}1weoJ9nYD#}ef)ILC34Jbq4f@QSYKr4bZ6owWZAa?fUiOf{?EI51yZ$qUvMzl~ z!nr}`PgQensmq-o^n^s>_+(MPX?4wcJk_pvhh`=O$)xdQ3)y9g`s)HGX zIjq#fBksi$yMMD4tE;lXrxv_T`u_|{8=^t&A z^+19q!Q4P3tmWHM35>F$N3zl~f(uYAb=tID4Efz0|FrT1O%Ed1(w%+X#$a&Ar^ks7AXq zBs_)sR4v(>R5KVUpU$tQrxdZOnbi8X05q(?T_VAo7SDtc8N%ziFAa3SqHHC4naqr(_Z|m|3U4f+8-Mg_7_u);23g<<|8XF z(%=0TD&mM7{EAR!Isls9C&qk4Ag5YrMNh$}R=Ozj3l@w?N&aSWbk`S@r-e0xl!#6B z!>d}lfvdi8l3(@8t!-el*B!w+{Cg}P)|pJlj(|io>f$#AAU5$I8x*K6J@g9yYlC+1 zzvSS#y3o}2vt~yaJZ%y&_5Q7q?0R)?)26DnOBEqoK2w>j@V6IRkqRys`Dm5@t#Lo| z$VyYX1ZB198T75BokHsoK3j`ev5u z`~eY0Dsbsu$?T_Di3bW|!geaj)yMCK8p`sTc;$dJ@PCs-RQg$GfbK|p`ws1^Pb_XmSc zQrOR-11O|+xQ7)Ryu%=Cm5fcoKC(R2FIiZ;{k$RBv^jTUh z{^-^yvPn+HV?yREnWXjk#qom+%}e?=rzarGh>4$zeF42-Y-u~iT`t~Y&nV}a{aGz~ zd0!ePYZU41>?yY=?BFtBYrsbDp%JiQ1pI6K&vDaW{!K)@wlR3v@3Gd)m6r9#(LgMF ztMNFSxB98_cWXA{+DD$mx_YiQIWy$EhAM@4RdtRz+MRxx$iCpwDkn4FmEK6M#PFCv ziH?!XtQh-ALNN{W4p&+^%iM_-p zdCE}F)|F&+v8GT19~3+smqPlKyj(4t$TgGCnVDT*K6*XNA%>-(grQ<&(W1p~O4fFa zMXVQqb-ri28`_u~Rv0r3KT9MuMd{+ajLqv)R*cXtgAf#AX2-QC^YJvh8L z1PdPAo#5_nnNGet_s*=De*o|5K3%73@2A|M-UmdXeRQWp@rAyhp^xlfsQcx^<8C_u zn=v_qux0joS(zI5a>i1mNHAT}g5WKJY|bRkT4UqTZN$^v&?Z*#VY*b!Sr^N$EHHXk zn@mBco?8&hsnSJ8S?xu_nfS5rUFQGK-)n_33Cr=XZ=zV(#v$e9b&XUd&M6y_j{ZSs^?Ao&?Q$(naAK4?z zHMA(c?sx6_kdVgYNKO4Uz19j-B;7a(zlq23VF*nCJihO{uivJbIGUQ#eHm6tyezV=6Y5UTJ9y9hYZzdFnmA{}SBY8z4^r2$u;y1}8~O zi|pSGE)O6K+dMbaY-^&_9IwnVUO-&$E)`cxp ze8ev2Q`@(9Mp3A#}v6#;6KaGyXo?=I#rG$*TYJ z)f7U%s#*>p$(+nxJ2cKr!o6Ud zRlk?k(JcmIzb61;TC28&bG*ORAQK{knOBb%o?>+?3gB7Mcm^APJu*Ly>u8Jj8*?rG zg$%M+<^jk++Q}ijL5tv<@Bsc_d-d9(VG0%%xDs`<34Xk50U!g0;Q_B~%G_FxE;07R zus@wmW<H8ehTp#Y5||cqNl3Q{x=-}ERG=dZ>v+R!o21m>axtDy>9Oc zI;3{K5mH{u(_rx zH@SLhXisrXi}$Y!mqXHLeN|lkY@bFQB64C7e$6dfQ{!;W%|PG?gCKFKBO?^c z2m5y#gQ%-Prd;%z8=;-mJNpYw7RqwRsM$-rB*xQO^Pr9Bw@jpXi^5P{qi^wN1?7QSp+%@sCIzbI#5VkVlKlzFI% zM}qaD7UPhZ3*)#j1Zx1JU|d!#2oP>z0pa${&S~d>t?ns}dc7rNTpii>61P|hDTkrE%OUB#{OJF(fWAt^Lr5YI4@WBioMM)onSM!(7wF`Q zFZK3%3|d6e>Q&l#50}hql*YAmb+*Yyx0R{?6yg7#6~#m~2rAE^=7#&_5DPJhDZZ@R zbYrJQS?T1ehAAM8;&4I^&=l-z$l%@#1e}g`Y^+>m|ArvmuXn``>SlPkD{W)Gt`j1a`Ar$j#=(J&4R7H6 zicgRQW!5P|-!qGqPQgDGFKroX>L9Gg)!6^REX7yV3-;xR<4mA$3!JtFMTY=jSVu(3 zg2kot9!D<8=Qb$^Cr_r{IP{y#>6?njve+J}dVQreuil<6UnLfO8P>5T9)?P5R-xs& zT=~HNuzz7vEeg0c*RXkE0~M(@lSUp}Z~80?0t$8`DO_sBd8SYFa8RlGEF!GQ zMB8wIZ^-Xuuc7Vq#m_od-@a}Ixp_FG3y#4|x`uG9>;@`9;r7a7jQlEc+_|xv?c+Gh9Opi;~DId5jl2#~a zYZ?YgGlu=7DVwgNQ!GqOcKsBiXj5^99!^&%$O z=DT!d7gW0IeNZ4l_c8IJqc5 z%pS8X8stF_%MVS$v9`)SSByK>K!N8mam|f%aI*X(vASkk7d6+s=HY@@l1*EQK6439 z;shkd{Oyl`7WLmsEtR90HQJ6H+xEi{LsT-i=g^8@?h@5_8^_bZ`#ucel&}!K?Lje{2hI{DDo5tg;3%f&?o;SKFNP6*xh!zc~x`#{CaESp*YQr z>wCZN;lyuj9ye!$buXQ}CbXKNPlp7!a+!cD2W&kTibb|&R|CXP`w#@h^trM(F?Gk;t9SM;AwI~JtZ^U;`}tQ44{H~s z17g~Sb`5ziRpPq(Vhyoh1jb9PL3_3@J+~0@7 zCH8&@q+k9YVwHxZH*05>n(^RhQ@TEy*XfFrFacV=+^QY;U}V%bjuXNB$N%g!(j@e8 z|5-<{fcKM4H0GgY`Rc_9XVq@+w}m|2Y9dD)k)%fkzZWEi`e(^h0{qW(2& zPd;c9k?Ml#dYvIfnj~HR7O;E-+g5WL#LRhA?TK z{&GRw+}Yw{Hi57si^3grW^SPCaPqUR73ezT16_xFgv*G#KNFF8cEOsR`6IXL0>afp zvtVzRMZ1OW52i|N>jMvJj~<$vh1}f>Im*-gAz7zaIYX8z8!W_tI>FAUDiK2dLnqC($ZN%at4K#j$frAWb!kyNwr zL(Khyng4(nS+4@Y^w`NQ<)=IF(SdGmn6AB1ZX>$6gNJv<*cE8`)e5PkjNYo!QMvyjC#Q< zu86am_fO32+D`W8&Uunkb9^KbBGcSMx4y2*(?Yu|2+3q`%A}dE;5{%e0ZzI-s0CV* z6WlJI{KR#DX~X`?|@!|!NI$)~XBm*i$H zfYkkJS;kKKZ4pf2!%`6n&mgVxhg|uWW2TAt=yIj+XSg8ve0NcMK*Pr(7NR%gM>2}p zz|Dbt9xHzI5d8F(+t$7PoQZyGjL&=hAt{Qt;F-q#F7kO-_Zp#z!>I&}jzr&xF4iX| z_c{mSfVcR5VFHQFUv>CROt|^Ua{OYzqUj+b^YNfHBJjO&J3wplu@3eF>L`^bEB8N6{6!v?YpzD~blvy68{`MV zA`jked=nZ)=$;Lllxc1~Fam1!v$yN7&UCF0NT#WHSgzVwX$!6fpXGPucr3GEcXctF zkfm|o*?_8Dzb3ytlx=%x_9X-sQP9YSUFZd6!dgIzO8F~{nYm(f^(m9fPmzc7B6-i@ zpJg@`d9`{%^s8kaQCxmg)GzVOQZt~nx@r;R9r%3iovq{lD+r|r{p3#$rqHJQr#)u} zz3$E=2jZF`77Lp?xF)K<3$`2*@Fu&>$8rLqS>Hw*&e{lcHCDzUdb-T%y}0=z~dDMtPI(%2d8&~A47f%h>YQM{Nu)ortLZ|T%&ASxRvDlll}Gx2)TM}tSm?u=paJ~hUy{%J)&M7W5i4zX7tsdK z*5<~Mc5qDvch-YS`-v&G9h)S+VC`5L`ooVn;^Np%Dy)xfVMkmmSoZCT&k8~X&=-f& zh$4g%JX?kCcQ#`7g5)j2J*$p)rB=S;4D1L^$a6nBRAxADtX*Pp5|HA_7+=UYWWns* zfX-xuDEZ>cLK%a-g@#h3fx=!SmtGt@qa0Q3E`KqqDb4L6B1H5dkRoyXpDH4_oIjf* z5I}?fUgI7znVsLr1;T1p8g1qE7STs?^PIMhpA@kP6^|Be*1A;EbZESO$E^=!%}Z~N zyGhIEl5++&5*2_z?t5At=QaD(B=_N_^bb~JX^@?(zKnok?FpBhX;rN3J+Cn&zeA$y zlftdX%YR=OQSpeTL12GUw`7m`;X}B1*0H^sU0g~>cm&}B8Mv6G)i>ws3 zumdpR1d?zRfMS@uPZQkzZ-CaO#nlU_M<(nAJS=KfwSCf4YpbvpQU)(DD6usvj#1&; zNihp|?Xy9xn?ghDTfqaHHiO<>6ytbo7TaMYoMj~jk{+E4{4Lp79<>ZihqsbLA}f_z zkjZM{i>>e5|b@nI$q(<-KTO2q<=V6Tpfu7dDZ%9j5TvN zN^~T_^DoI(@nj`RY9o%U;|`XUdy;M2ZC3o=F)JxhR-T~>6D({Zt2w$`cKs z;?8KtOu!3AD8dcD1?z+fHJz#hLQhzVjh%J_5HwTd18mMxT%=Gsj(^34kF+6i?3u!^Fsi;(%J&t2{=P&31y z8*O6#j*CD83Z#% z9Tm5K=8c4y#=8o&6wv6lSfaxh9XI%8q(h+sAZ%;Bjz|qm2FhrLzs}F54Y1A&GDg4m z1~FB9?x8*ygrYCBHt-IF{%36cz0F~fQ1<}{^Uh!98ZZxi?9pSFe48GupN!z=tSSo~ zYi#*k5`&SEcwNnU#s+L^BAzy1Ef z!!O@I63Q)F_v_9QUx9IIi!-$uHG#dlm+>cR-`|e~z?l4hLT2>A3%?DEx2z{_K7s}? zAUV?}bRdbsXBrgok?!AYAkMZ7-fb~ryyFnyF=< zLPWN2LvqK~6ouO9LR)GT!Xqwrmg&VyIvkK*(3APjOYNT7eJaaeqC;iSmlE3(puHm2 z@8r47$WPujlX_{WbGhqU<*Op2Ld3$um{Si&4#{UWf%G1?9$2?&<9ic=@y45WFGEr! z*7#M7?6;)1eeU0oCNH^!yPtw?s)8G^$3-_^1}hWbwGIuE9nU*HTGS-(@wS^f8YoNX z)*oda0Yjl|@m0@*jk+#|qGjWbO;nF~hGkbaj=kt+!Wc9?1Mjio;#i=cX-T@aw%{9Q(QbIaF7sk!``Mmn; z@$L1*c-jF@p;@EAb#BjH`B!DLP$saIeVKhY4zlFfw0hkii(x89DU9gm%xUzN4|{Km zy-2t`$@D{Wo-|*`MWOE2vv^nIWJknV(4y2ATIS3zD{iDAlBB?qOH<3M6v8z=jp_w=vT z*GC!*zypuKkGrsGI(Wg?m*7z!>GnX7e4wibfgngNR2j*{->za9rby?m{l3D4?mXqv zQ?fEHrf1b|J(6i&qNI~5fO+u=O7X7cp`YVD@+A)_Kej_Z22Cr?M%;@ zSb#pht1d32EQZ>M_kKC?$dJIynfeg>c2Ms)3fN(wCrVIN{Rq+!;n*$$mKo7*t_8!G zwK_=*#xP2H>$(72S$-(Jx}Vq631T6UkoE!FgC)xn-Z`1xqe`Wl^9q~ui<-nkPg=7` zQ(_a_dtQK`h4H|Gdo;6`Haol{*xf_x@*94xn55uN1!zTm-<6)_G}IDD@aim97)$R+ zSNgaP~u^f_=x|RYRbpt<#i&Q zqt@n#|CU*~H`qQ5;qam%*M$|F5-NstdH9j-;*LZlFLJgz?V2a4?V|)?!S^O|@Fadl z<6buFZv!A5e4QOs9yDzvWY2)$dk(+vzxpZ0k{G~$Y1)T*;Q0APd>`8d>TI)f$d6-P zf_H1Mws8SKYH5p@_|5X_Nc8BbQw5|m>|jWI*<18&6}ckC<(wvm|CV^WZ{@1O4qVGh>8MCg@rCHV+%kkpA9Fer4jpZ$@CZToPgr%JJXydii zhlHyXIoomf*hXuQYI7%XU2f6i=)f^LV^8QD$VVrR^7k}~>!g4s{b4S~rf<3QGFnFs z&z_psjpg@;`e@*(@<8zj3{gV#Y$Y)EUoA-uwUpL4ST?ZSlS=-Pe(76@YfJ2}qF>s6bqlQgklcO;=a>-z%$E)+k% zLNU$zE)3?%X<60{Q_#jv@Kd`(mk`xOs#B7&OBv zJ4nZ!Qz}g*%CksK1V^8`%s(4QNS2=w@XqT6%7n7s>@CTvi{q`fUtVV1>RDTtDY*vX zAhf#g`^i;nd@%ZLxJid19p8CM?MQ?G$1^_;55Z|7n%}0lx8hdk{Ubs#PoE^?2`2Qg zgWK;wK|px<)1D=5r^q3Aw!U^6goj`@fioiil%K%tbB8B0TeGNc^FKqPr@ze3e@zSj zQHgK5uoN_yABUV>(iClHwfjxFtw8cBkMP~krckV0Yp2?E=X8%7p;12du-!Gy9Wo9t z0stRb_a)M>Lqt&1tX(3;7kSHsxK*ku7enNu6YCbsa|@0!9(hfCH<>2V6s?EO2#sqGd0Q-*jy#%=Zs~Ji z%3^S&q;k=X>5^SMWO)d0v~IFLO<)eYd7tUo=)ia=psZM)Tz`&`QHNnLi@zvpd4(C| zImN2POb30_exFOz+A}ig)={$$fmn9#=s!Qdzo*o(Fv*i1nCneYJdk&tM@M#l@a$;y zo0al8`spE6r=?lYVEVEptjmaKmRx(rFGE-5U>yi81^IdgYSDB#FF+APpNVz9(U0}B;alRRZRluG?1o7*no zitwG;3+eeD0og1-Nlm`${>}m}P5X+Y{OkEC0i%8ZG#+RX}aksvsyTrK{NWU)&W# zRvK0bPy-8t>uj>gY-Xg!yj^{d=0yfCgI(J0oxu6ls#7+}S2;SX#|i3eJg%3c8PH)| zL378kZI+?_vH;r7r@4hCi$1&x+aB_=d~(W%_&T8zkdr!_$q z&>dE%!^}eC0lTg-4Z~_HpCwX(Os?Jj%c&mxlG0Wu^@3xkq9v(beu$6BO=GgP5dF&z zjAd$Ao*2}c?+=b7Q4D$~*lZmv5fA%s-VEJJ$W>HjP0}_Ys4Da`ls2Jreepo2Pcc2~?sHe{b2yw-CB2zQqkWgg0rkw9p+~Q+)w(`d@Xq80`>ave$g(eax^m65&%`#3}gLBT_ zB7^1aI4U#i39rSqiR*Hh*^g3v<7n)_<0x$Qv*+PxI&Pqkn)y`VfBl8#ys`(|WkycW z6bB#O*^v^P|1U}@7T)~}GcVWIhtI3d+PpdKO8)%P-1)E=QiZ38()cDdq$rxH5XgNK z^hNI&>X#MWDM(hlbatnj(k~mhpdvPm0CtfNKMu;im``1X@Sdu5THlpe3K30Z&|X37 zb}3ue;kf@!{zGJw5%UM7v+?n(+Y;jpJ@#gDcFQD$tkIAU7A3b$3!~RSOq!Od@A5-{ zD(CNBV;$JZ9}sSh=hjAV4Kh;0^me|wCGXtSoOrY{uo%H*330{#{5X^qS?|?(>{HL* zmybSL%(PRDU>W$s#QeQ${O^GVe=*Pm_WlpWBcN?V4%)S4lyXam(K*kpzK@%wRJXtF zvL^WqZ-QOhn3rVKsK&b-xNN9i;{h4i_avVsqe4;j5iH^lFEz$=kb%oSjtmiBN!WE#hWT^UBME z9`4mr-igV^GVEGN!+G1Wi~4lCruqg7Y@9&;%c90;kw;-m!J!GVFZq_~is}h!-B?YB zIPyUha?fRRH=R!*XDzpqRSQ&a*%``>H&n;+>R~FzSMpSuQ$D?xPiNz}0(M!{Msd4D zM8~E_)0Lht+xgm=kx&Qo+q2|jyS{to+-EZZcQQLG0}uj6S?D3BN_R`+OaP-BVQqcT=hum(|V48jPFWXMmdLq1M#QBOf-wa8XN}6syO;%^G2~*gc*7{R|H#aI0DbqFtJ;8n`Gky+2D?>o8no79->7{Ybt* z!WV5f%+&jB;Q!7ct7kxY4(tU$+2!vfkjSOnxJ6g(&+uRLb>d^E-n`Mv9pypSyr3%m zZzSdtKw@A;c>a0<{Qsi8ZWJ$^ecD9EvZL_p>P@bDZR|LPiv`^8az{4Q!V344Tc4o` z*xzYlId@54!H`3EjI9y(I})aWKJy;0ax09@P2oL%K6?Ly2Ge%7^ZYGbD)tQ8E*T^q zd`Wt>0#kO9KI_uHveeMfla{$uB@K9At|M7S$6kQ$&A!P|z6wjmx|93Y%g zt^D9B$=m54$p(qN7-8bjcsZ3$FoD+tC>Ve9L2t5 zTF|^YMhNyg5kK(YNF_B|aLj^j?mmlEeM==`79b<8BtOKS%_Qc69EF&@Pw@`@ONxRk*7j!E7WN^|9d^!HsbVstV0T6-S4AH{g$c zG{HtM|Fp-z3LWe)iFd?ayUaHwl^=CjjYcN3lw2ik$_gDP_9{vN9)@j;5;PntMeEza zS~GjN?__{YmboPG{sj#W(Zt)3D##1wY0UZ2hnBVp0^6n?U)QwFU3_|HNzV5!DWzEMNPr&qiEIa}M0!%8s1ziXS28OvFcG zRyZNcB3^&cx#O1_)sBPPQiKflXjH_x@t$U}(@?Xgu5rOXTq1xbd5%(#BZJ=Ws&r%- zGy_j&5qYGJ1A`2*0>|~pPLd=C?n4@K6Pr^G_r6H5@(=vPgu&`2tUTW7Cz0G`UU&p@ zH_E$#Tmv)wQ(sg!7|2yhmo$;`AavKsO;mzp^$W)B$s;8?-~JzblAnrZTf@HlP>tj2 zu_O@KX%tBDQ+I)8E(M_$hhIVk@^fsA+5joi%qyvbqUqPi+Kjps(Dk7)MS;o*Cz4f- zZ^vTY32d8u_flu)9AFtr-m7`-h5~X9sXh&bmV(-tE{)H%bgt#aYp{zWRB*=je|>|y zj5Gy+!FMY#_#OZT-+!ZHZm>UXmhkw*A(2_yv|1UZ<?`-jMr73 z98%!Y^pdN`?R5@)Qk@n%*(k&~DTw{g(mG-datbb^fsiEagd8I;*0U}lLu6GsNPl4W zP2vR2jB|y2p!Li~oo?CrSp$S`0NC6hUN;->f6fSoq1Mj5MH6CVCbXh?bZw+-YCf)* z$knj^tjc9YLJ)(H7mUq3XBH=htM2LB*G-j_&5>gJR;)iHW~uEO1w%`ho?Ptbg34@b zul}F5a(p$|i;z-nG?sNBDW~d?qkXJiGuh)yv1}Tu5g_Lkx!(?#vsxSimc^V)o1v2X0;T%+P> zWPJ6Q<{Nl#rF2xj<$Ds&5P#VzYh?(7H>0gJTEdz)CVvw0`Rc#0c5kq5fM9)c)p^f- zzjgZM9{Bg0n)K}yVV@TTk}b%WcV_RgY(bRKp=S&_mpw4>ld1PkIwhy9zoMmKfQ*xm zA;c{Lra@7}NW@p}I98Xry&>FwSr|f_mw0^IifYDk)+c)*3yjhuU#+KQI0t(1cWNj) zV42<&pf#d_CC4Ik5$!2a5VD&m3=4!7ZLmyDzH@b8SXh(uW1)mHH9y_O=Ja z10;<$RFKi6lP-+;qH$rU&Fz8P{Q;06V^)9~=*ayU(!~8g{M=Z#M4aL3!|L`hdiz)X zx>vT&RsT1W>n$UPW%dz0$dY&Pk6NBTmXN^A;=gkKVxz}^7BEx_Jd0f=J}gJns);`T zdXwBVQm_xhi%kIi@Klw8M-U1G-ri8OD zQp5!Q?tcDNoe2n)rVIl~UJCI4SAzFpxKsTfgD$)7&z=*1oYJ?SGce1<_&Dh9<%er7 zOg8H~Uz(ATiU{tkm1=Rcr6Vyj`wx^EmU86rsgGispTG%=o~PRj5y!?Vbp$WxnJ&7j zQgw4}yr&NR)OFJ;D%=Epf~JS3%|QUITI#sAd0qnjxeQYnb0;Twet%IPXx_rQ@g5 z*bavvJ!`n~r_P~eQlz_)Om=c7HLQs^QJ=Lv+1@Jos$JN+8GAv2KBJjzJ+V3Jr_R7( zpmJtq4-Ce`yauz3U5q5J*(d*B*6ZMu0gMfxfkxYjoKX zPE7D6r<5GSgf8SW7;~)siF(zwRLc9H`>qfiQ-iMO%miqwOBWB~X5mbgTTSl-28iO^ zfZSjIG70e`$(e4Z%Pgo>x+vPUN;z2`TpRiL;VeFoZ30;#7L{zB6z#FVq9CcCb_f5T zQ5?^(fIv)F3#a@@6=iym&Ygbt8?_!5!=KYddoKH9b`pCKlSWrTj@t?YdnAL7g>(S* z*TxKSB9ug$+uo+}!v*R>4R1zN2GbVn`4fNDlXfRyG>KFW_COy>?Ja;c6L z@bIj>4Zqy>OM$GsdIPa0E-T$D3%p(RNiF5EaP>CmJ(xK7Rg3DLWqbu4XQa6+n(rG* zI^F>PIoU@h4h-rc0g&e(V5=S1G<=j{+(CNvE$eje$v3LeS^ElkfTx4DN+hYMd9~_+ zgutf1(@ph0r^3uaZ|=3_G?B@Ap#1@+Roc%O;s{vQ6|(Rik!heM`*%w$?Z{q zXh^<7es8MVnRkV7CIf5Xg@Lzm)*Rgh+u#sBvL%o=`esEc#m*lSEi9?_1u-e|fYdPn z%3Tz`nA!|!@vBZs!47=6`wm1m;EW0RBJnFIfgex>8h9&$x+i1u>;!8D1c+1aH@+;Y zj~cnN-9~Av_;LdS`x&o?dkICS!a^CLfxbyp(5m8`-FYvKQ1#dR&1$!Kh_fK9k=X4I zw-K0o!xJ5MHu!;)+lcppCZNJ8S|s*U-2~OF4v0s258X5=oQg9xZl&9aVIrOylsS6) z)<8%)oU0-KAfKXZ!V_XsxE z?eMkyQ9pevPCd_%Gc|QiipEI$ZXwWh$`-dXlr1qi^KB_e5+Y>+7Wrm<;g8=-m=<2b zRfU^lLK|%#eT?dtvLrb987r2MDmgxRBbQFe+-p`o4yhB&ZEyIQ>8v;^P8S&C`W&|* zP5W+i$pE4c?zthzIK6W`6Jj}8<8w%EtePPkp=vp-of~}W&2?DEwEJlDHj4-e zR+g74@j2b0CrrX$>_8n>%IDB@Z%vPy!J2BdQTcFBBxUa|J*Ef~z2i5=K5Ii>s(PGaje-CK2;5lIlMH&-j|k9@%nyY<>~7e5+V0Yk zR(<7ikCx>?2}W92WKEjlY@!_-7iY2EQtGt7bC!6r1B2AE1Ylv^ZgT8gK}j<-*Dq_D z*(xjSvIT7z+6P%7?q(;LbD0b)T0yJ$8`LadMY9AmVBc(NNzvA+E_a&0+&V6CjW)$i zD^MuTZB?G}B;Ho`($Yzhig4=*&`TbRZ;ES_p#mJvxel-V4U3kSUfC&yy+)ZLwk(A^ zT3Igq7bpIRMgp*yC|mV@@}{x5@vFyW)D?9;;^wz>fz=z9S#_iiM)hMO%rn?HOg}e2 zZ|_|7M{+#dGP_g-FweNmGAK_m=!HM#3$jepu^$=xA_}TTv@jM(;?RGO70Tf3a*DWP zv}_P)mw6PIVrBEY7kq4n8qdhYVnTW*&-AVMejdT14l7&75fRE5{S(8Q zjxCUo;8E$vVo8igYx5m{{ft4{IMskJ`RBe#-Nbx3!!7L((l&@alZxBCGv$}?D*=Qt zv-C^*K7(?#rj9WPv$8l5aVBhHY;><1u8)rxUy>8cTi3sdc2 z$j5CQe%)(O`eT4>h#2g5%f0%u{bJOm-zrYk_XejyPAJ_Nk<_PZ1l2PzPjdO1prcvb zw_jeb1f>V`+H7=f>+~?xKG()AoFm!uEn)b}v39lsrv0F-Ha3AQ4^GD=2^P_;J2nWA z)!W9z<7FAwT|<&RMOFL<+vi(XI98>;I8!_k_|gqwHj0lSvUDlJ`17TjpAl3RdS@7G za~FBS*ho0#RnI6SR~3d)WffE_7j%-I-Oh|aeAOX)0?Yzl%pf{OHaZxG5U}Rpdk1Mm zdn4N7sLT(R>rF#Ore-_l5<#b!e7^6_r}F~z4M*IT3`(|VF*wTbVWJ~RQ_4}%$y?Vr zc^04~2llKCw)IR+PEk24Lvc7O2~TYt2FF`YA8ogl*! zpjKcP)yi{y7o2wVoR0aLTYVl^>YP&>!ly|TU7hBkO08PkBqX7r=e4S3-0$8Z)am6# zDT1bwQej45>PVBUiR9162bYq9n`l9p$grj4-_Hdu*}!k6JLYHURfPee+xv=1CQ$P?Yit=aNZBbe2s2Q7IX`4u#(vo%cp@)fjaLs@ zFWnoR%T1c*#^B6;$0>-TVBcY@d^_KGyp&2u3Er#&|A@JO$Z6ye#KjD-|NNO2!L14+ z+g#=3{EX3vOX6X0pRMGo&5~vr1;h#pSu{O#!Qz$B0{jYu z`YevqA$^%#cjzVk`HEeGrr`)_MkwEA5Rkdu+u)8l=gvHP!U@<9-#q-kT|(Bm(| z?nV~DHAoR|9!|&J@Gmf2>`>l@fmsH($w49m-w1c`xX5g0xZ!ODkyA&Cw8c8NVCg zZLRT|_Hz=en(+yiE=L+*55W0Uo;}Hw8w+187?s9Hk?cW9_em-$v>&#(tstqw7Su_9L4cYWJaZUptigys z(fW^#hp9cE<#th$&(aRTh5eb!l%_?t*_Avy%b23t!cd*eV6MP1Yb0Pk-NUGBB zT`Z=OhX|gT6sh5qA2KhAtNN%5$7duc?>`>m&_ppoVmB`i)vlADk2=4Zg%b^@7HZo(B z$PjVm@|`e#PT5m`=@6OR>pTTa>vE-c}h)qf;p|s2hA2=&YW@xA2cnso=4m#rX z?RYQvpa|%`5-;T+Mw8XlqGZnH*-Q{GBlJ4z{e;nJ-V$3%WO`SXp7%FW&e3u%s@cgOpX#F8KF4v$ zd75VoJ(oVEXB{D%GTL02?pL$V%HkfKjb_%b_*|a;Y4nw4iJ)x&t;X0+rc%O?S|boGQI`}I4^|x0WGk_h^!c4Kjj$rZj2uE4s*5}{Y5 zo%B5P32BP1n5I zo;IOn1!1Z=bEIy3=;v~598a_15@x+9@rl`>8~t4e{?5+qbL-j#&FLQ1;f$Xy*Xf%e z+3xuZ-Dp=sr0>DYvG6tFRRlGX;k2@A3mAV)(Av=A$_U)>h+eHKERlWyN1!aRrG~Ld zD07~AM=h~SRUp2?!H|i*I?8JvcTE9rEu5dK^wKWn$ zHXo`qP*+oR0v=;^eEH~%YFLH}ooAsu9TeufY$iub(dyIeTzw}SSU{*|G&$23^Lo{A z+yYI`V-kf~FayeRmu`fIBb;i&CpYvE;++{^Jjk2;WXfi8*#R(%2vww~cH-tRd z1kKW_#NTQ5(!eWg`sz%ns4Cl9U=ECWFpw)pd$e74fnc(SGp#ABuOTr!&@INzhh1!A zW}J?V#hje(=B*5Kj&Qz|fkkjd1QaIm`EQW3+rp9lEVJ;(WvmlEFU^RVmd4K4M77Bg z`gO8V?%POchjA2Hy|o!$Z=qb<9oYgeFX=3Uva$4GqS}bl`i8}&dB-TT8fUf;r(hf{ z^F!vIubG|qD}}U+{#nzDqak765d}4&)YFym>VWn0?s~ITTMR;DarH8te);!Ca~Xk# z;v-^(N_~zmM2a7(bGJFmQo$sgcc=Y?H%(AQIc{J=CG+6=YEmq zvXBpnxEkh2m(7nd+i<*IRn6&Onkt*cDPCc67R!#WI`-UFTEmiO5-A5_b8GJMo;-N? zGHG`!{TuX)L?a`rW!x4zt)f>*sHCuynn`k00c3DLAo*G5rZ^tHc8*3XN+O51-z!gh&Y3>P?Cd$BhlVdo?7O(daIcSXw zQleJ&r(n-LO%K(Ef-9%wPW5*xM;Z+M17st0*F>~c7DWt#0TU%Nev`46L6fKG00;leFCTQY%!!C zacJ0~WnrTR$#mmy<{gSo16x|0iPuz;1^rAHPe5Y)m^vVEPH4I>$s zoNRcb9N-BDYg7=WUjC#*m^93NQa*j>>g=?`f(abe*}zfV0KAH5WR04_Y(I>uH|%H<)NsOds2S0`h& z=sY$S_?c;dpP4la^IwI%vGM4h5;rIAh*FdblU74Z+;pRf`XSbJ{8)T%cm=M6Q(*~G z*TcFE{c+@fG4@VDq6IoqauGY- zXsT%tprR3hi&)1S=a+MY%<1ScX1B-T5OmqlnNIOXt#M*_+M87TNlxH{C=c1tPht(I z)tZ{w4EtE-MzKLh9|3kGGh9ic&DaBErfFv zfn|NN2?;{6xx*_~#%r=7R1Q~&NNGJP2_rqS!Sd|f9#>H99Sq7AD_=WjhMXXL$~tQ|+s?gUN5v$@bhmQUK+TMgp@3D6y~LP9BKB98gs|KAf-oCm9+bd+H*tCTsI zdFn?__uW3iaY~hFyp${{)dsCg5&VKIf4Mp8!2GpADSSlqA59~R;?#MniR7>;1!#<6 zX5~Jctsc?=VTHoNyF#&#ObKE)4vR|b4DH3GO#X5CuWVlFPNjc9ZVkB9aFIp+Ymo){ zlv-k)je^ueP!>z%T>O#+hyUakii|j8SG01-^tM$GJ}thLbSc?hE{A(=WlFLR35?Mk z#T%DomHfT4z|65q*V#9&ws3JbJ$Q%G)s7H0YW5lV{}arKBcD+QYK}*77e1z;jEg}d ziQ-A^wpmp))0gH1kW4Dj_4pGsKxrwVw~ zzPVAy#VOc7KjTHxd*$Wvyz|FSZ9`nX-ZB4lvY}+A+e19YKwbp8H+4r(OuVEduH6-E z{K6S01~&?U*^d^Bi+Ab}StnL$ltiZOFvp8Hw028G9!#^q;q^#2!UW7*v-B)|^JV-O zV~AjNl%6y!Zi#Y_o}+u^>O44T)V~$8nb?TvkT7%wzYEEwp|Crdk40WUw+6PUc1(M0 zf>&VAytqcgm%`j@51tilef{GmD_0$V0!+@>1Jd5Kv9i-L(0sC;>(kI0L~@~ddc8F7 zM0j58AEX)!CGT4r-NMw!FQbCZ#aoBd*}MF^F|Tgh$IE$HeX!tlNEbMtq)92Q&+LW> zDlLfTZ4)!Fr7w$_<7S&=8yTD9{%=^sP`c7Em^XDUdbaA>%j;lJ22nq=J1DfzMwhb2 zzuH5-MT{|^?6c7?Kf_sG1o9qASIU#P+%l8v+_RAzwC8##DSn<>FqVlYm73*%Ow-o2 zTV(|APFi|e#({$6_3;^=)++OS$wSUBXwVtyFL`fCtbtXCn&R|dwlG}Fxp&uU^+qKp zW;ZOU+A)TQQPc)1%bJ2KZY#r6x*|lfCuzkO(bwWNR^D#R^DS@b>Blf5Dw~_pE-iy& z6Q+Sp6CP2^m%dJ~;~PMtgF^&xNh9qrWqLJ)j6_s~`6l2|q(C5|1<55M*W1(u!XyjY zosca(pz0Qq#snq|aBLND|Af)N%dw3+vLYt#td+Tfr^ROYMY=G~{tpu`(N!goa*0^+ z6!B8mjAGeGSp5U$f#BtQfG<~G(F;`J&=lu~(q=NG(B*P7DvEJfZ{n~;PpL@K>ZRBX z>1B>9h?5l~Uh5A=ua*;|ztuRdI#o-fR2ztR)Zf?Nubn810ZBras@;;Cs?Ua;@-Bro zs}s!VIBy3sQ5X#u5(j0DGT7AQV)&$nA`;sd3JzC?2y9>WHRpM1BPlXTWTmQ>82?rt zl#ZWOCsk|PFT7?oX$tXZi9OE;zr&_$-asS9xNHg4Qp z#l)R#4h{D=kPhqHRX0)ZlId!E5{h}1c0@@=mp;I3GlbLa;IYd0kB+`W%bFG&8xm853`&fc7P z#&BhN|BK=j#^5YAV3bc2;So1Mf6rXM-qMES;WWe5p*?9PI?#C`RAZWMT5c+I9ko(0 zRxKS;{J5!&`sDu$Sdkt{q{~)P=92lLEZm;-9L2NWhI)0`$n?oKUT#Uul=3v;c8TR( zjD6{F=6KGnH2gfWig!;7YV%P_GfXo|!@>`x?$^5tynq<*52~viHF)`rl-s%F8dH{a zrt`w~PND5OOJfn83fc_=I1K?lazR$J<1kRj0%vPZJL6or-pA%%3FA9XO)>?ySh+{f z*1ULi9^5D}A^~~Gl_n;0wO4SKEd%j0fo;nAB0Mo^&8u=)C`2H8f-&H$IL{UmL>J>F zFl2jjX^2;!5FbUqR21jd6<>lVRX%REbRQer%{HJI7FZ4}&pW81C?Vy=plDiZwX9Xr zz$G-Vv07nfV&X;E-!G)e1~$r#%x_HF)LRp4jrew$5eeBFdtFD29STl43cst`<)LCd zTLDC$0OD0YIR6^N_0Rzg=9=zhzfwVHl`^6_ZNOEeGFJ3y8(a3$6G|0s*HSOXxmn_TgzfprbRq}D0 z86)xycr!&nqe!Pw_r$=?3DcpzG?a6dE*;a)OBb;d%t0>AlkxP8(WPHgWASTh(0@%0 z-G7*xt((i3xH#;Ca+LkP&AYrqOiEpY*_50?r?)IulX&vCPD`tawX+%;yN zE8omfJ3z_!GG3V{h-uTPy79%e>0%7NG2?{Ko6UmAO(`W zW#-+qBp+2-+27HgHu{8$;#03D-(> z^n!zFkwPyRC2Lhxplk3R=!Qj@pV1UueejslrecCJeYmRfWVUZ%E|1)0L7LEz=CJ9S zQU?A{W|k^pB|(Gn-LJzj#R#HWZz;VSGw(YzHDP@nqdMnJg@km|;=L&rs!U?BR+irs ze_HVItil1UU5-`)!PWh*OJ;Pr&o7d&F5Q?gSTj!8puzWtvvK9-DjI&*uTRju!ci_G zF%L_)TK@+WLvY--@rqI26su`TLlc=|Q$lMUj@OdfPGA%n6l1_DUTo&PlphLg2mF4E zSyGs~Rw~A+GLWSswbm!vRmGG0J?5$%P+J13AO#rN4wz2&gT2`HeImfkQf%#(L{Qwf z;lZJ40o<8IW?ucObohHZYt@os(P>V^^vIMW@EQ`|v^=P51oLH!S#XhaI?RHt@z*c^ z<1@xsJ4x8AGi(&>I=s7ykAD-}FX{LywjIifyAP)l29wX4kZAD%o+$+_Yw$BaUa9Gq zZpBL^p_(00!A{a-Ms!y2Q{V~CZghqMu_qw$$2jq@v)86XlkvlfeiZ*u0|{4q(Zl9M z(nXosXKf6qOsx2C3zsWgKPqT&!-hvX({zo^cB9kO#Lfl}7l5M0*s!-|%40Di`et(j zQJW4wF_b^4)^y|E0+!}IZuLIK@sf4v(KLQdqxlD~sW;Ma%^b;lPxd0k^g?f^3a>V8 z6-s|39Kfi6ivE-CIGq@?@S9l6mE>Vrys^ZEF=lbDk2VqeW^=KsY`SU9*%x7H1f6|r zSoTNEvOmeoh9;2&pc3`gBjFR)eY-}~;pe!k_8u6G{k0}zWGe2l741iSySt}tJ-ji# z?mVZM@0{%KuY=?F+zU57n@0vm7%G_KMP>@P^!S^%ClFnX4|2L&7)T%&jJ-8>onhMa zTzb?r^A~mhFTZ?a+1Jj_`xTBvh}|46eADn+F4k6{lvF=!M|c#UUm#Rpgg88txt-D- zb1BZSfg^Z6d;BWW1x(3!PTS!NIM!s91%bhe*bH$pX3zFb$H_{6Zz0MCz29;v5PP%z zxDkw(39@LswnJGbozUCRDQe+Hc2S-X;aP;y;c5wIx|P$6&~ka?)1zen4(q(7`$B2m zl@Q*7`TVa0Q_>@ef}O!4_IR|5C9BfIU!OdQ&Grm8>ZvAKRrkgk*D_P|u3jr%Jj7V# z;`|jQLZGu+V;O>NexP(~uIgd(wIK6f@U(sus1_~_qnc#I1t=eZ6AH~o-aa{n9>PRa z7E3bQ@sx}dy`h@KV1*ieerqRE>6~cukHYp}ZZTE33iQnsv~fw9YjYJeQZ7Mz>LNwD z^*4;_$NK)`et^GncDP`b(mY8S1ahRl84_@kyvvz6#bgHk=yXh61HU0s_)Xub|3DNh zs7IUVL@gU5O@mGUQ?+YYS%DyBC8j8h4^2eJUZ9jg2ebb|?j_I}DkwkPews}qTmA}}oT}zj@!zJDL$PpL zJy2}IvU8r(LR^_sG?y`^1JYrpj?=Yr)DB~ot5+ClItMST!RxN8K1Xst>@ACYpQW98 zv#oAF2fg2C&L5wX_4N4feq6VJvM;@#_k35qM{!?#zArCdovVBod>?u?lbyTxS>O0a zKR&aQk>%;TzVsM%Hr_XKc8BLbx7~|;S2+{res6re>}ThEj-EK*TJZP8_}?fIk9?o^ zrF?nc`@LiMUuUy=eg$Yqe_h`eUEd$l^uB$zz6Z}{{oaoFel$2Y)@nSF;(ik0zW4oh zdA2`C!yXPU9?tl5eBa35c6lkl?xB~CJYWvqYmEq2LwzU?^86Su@>GOA(P7CxFTjxj zcrb(7zb`PRnho%$UQh60$nwR1purD*o`U9=Vj0sfdSm;wv7h^+{l5G4{ka`}2Kw9l zzVw*|JYbH0$7J{&li?um2G%r@#ZCe8FM=yRJW=Hkd4hXl;j7J9{*RA%{?C@*&wCu- z=aZm~rRDx~T<42lccKV~@6UU!P@vTy-QX`-Nrk)cZ(XRL9u!!JS;FBby)cy3P@TxT zS-#{#%COCwVu%|lLP>AtPOqQRc7a;j10>v^Zy^a~4Yyu&QaWLeA5?}y>6<_w@^5vN zGzMuua!Ji_rf!Ni_sZ|b*yJ5v(@8Z_0O=%-E0WpU-|B%aA-;9K_nxP) z%U1&zsLMZ$+X@Fat5Or*hjAtGa$&wSzR%uM?=Gy8ADu7Fd5`Zu^o%(CdQWb0qmK@> z;?T?Ou*fxmUiA@xC`c)h-Zy?EY6RTq2kaOJjdpuK z{BCm5cImJ^zUo;uN+6(d_Dug`-#!)tpzHROn zWiC@Poecgctd(v$Cf%Sh5!O0=b){RaKhY(}LxX;o0*(DkB`7H0>}j<{+cg;@K7mWbb z_jHhI9{+iudl9r=;h=&8>fb<!Z?*mt)VS2`{ZKPPixw3vqdf>n4TigGl7{Bnil=WZJh7N#sz-w zC;_|DUmONcJSkT&M9n+x-LzgFE3lkEbmIPW2V|2vaCc*bv%3auVqb|UsmEGnd^{Xt z8Od_T#FzKy!Lr0XzYyF|_0XDs%2wH~jbX^Q=i|xNC2q{rwoYd})x2$eNZ-N{*@T^} z*2%~Bt!F2vaD0sCOyb?#&EU-O`^0hPT0AHixbeC zUQl{*mR0aTqBdwD@Td%$FW+<}G3bH%w9J{qdAaDz_@sud{KS>e_q(%)`c=YfIpBCn zuti~h-xq%_M>fevyxJs;l~efgxJx`m8n_%Hzg5il(~Ym5j$lX`F^pPr-JSsaZ&|K( z=?nKgTU96Av03W9dM;Xa|F3eJ8uomh+Au?wW&d2;*u^DFGyR%Fy$Lu8Tb!tpC4Icp z6`=0YymAt=iYA7hug#Z-E6X6zwvwduFAnM+f^w1=q~^>K>Q+s&e?)s@92B1GL?%Ub zg@AyLIhV`n2ApK&bX>LsBAl?a);T37?~O0BQKh06N5&!TxB9WAL(HhTnRBiM`1H`_ z*hdVsEE*a76=?dLa46pE?jH&7zKtCoS?P-&$law>vA<=7D#udhupqTp&w8|SD!}9k zBMjVIxq#6)6VOY{v6Wgn+>g*r%9Ufr>M1HKj5;H++9to=To}#q#>q0}{hySX`5cFf z(nfj0?%4H&pR{tQ6$1&{(>8}zlhj_dG63doV`)9zba|6xpwYEMt?my2=}-C-d@df^ zq|pXLOGCqG!P*76M&O%d1pXRrZR^d-%~r}l#fNIQ)dN)Liu(a^u@$qMYLn=7t09Dt zGSw7EIVPqYs|$T=g>pW5d#G2lp8wr~X^=?*T1waw$%b z+`PM54sI<591`poO)(1-P9#c$M()Xr-#p@x)cH=;KT#4DYZo(6PWbN2+`gG#R+@w{ zGm)N)CJ$M(!DdH35cKrb*$zJR@ofJb#sgYKI)*b7Vwd~`9UbHtR^Id6XD)?2;Pl@Cg!YDV zjY_fr(cukHik}8)=oo z0WVUiKXgoGlGl-=CSSbn$Tg9Xx>tg?i@_2H3ICrh&rn?Obg>=L7ektl$5+qxU zLUs6$@n*IXIkSD~a6Az)URMS)|!h`kmwpoxxAd}T|ubY^%V)4jnm#gj@TGIRmr)4^04-W{1G9| zPv>syn;klMce_7Zy2h7b(rk!cfEWx?hI&X^AfA$hXurHQD!7WL!!<#OO$%j{mXgCQ z;W&?;ylFzzdIHW1#EW44Gx%QA$l z+}l0cwS4fY@TsKPs#_H|*3%UlH{IcUw{(2B=x55d1fzEp(24IO>)#>0fNt0JW!|n# zODYB01N^Gh*i`M=<^ARyUrIji-<^FwThhBgbSlhO?YE+CB^W(^F=F{uUHn=4N^}Y6 z32%d}LZfltl+pwkP{pU3-Jgqtqt@jjf_pG>Htgk?(~~=7gbRDgV1rh=uH?eR33Xy# z#4Y4o0$`qdhFSJIUcxnMNCo_K6BSLho>8 zVw)@bSQ@k~3u%(@mOv#G24TkXdh1o>kr^5I0xv?MCGiSi=QlgCByGIF#nBrUmS%y<2Vyhm$Je>Qh`Gwq$X(^b}599Bajp17g}>E*lJ&(gl?W3y;erb}<#C2#M(Jz&C8@W7*_~ zDeeFz<9;HkAAY>`czwG1`RZCf3pSghNr*Vfr!q2032q;Tdl^O5;GrR5-rbG)r*JVC zqEFTYtQfxBRV|^T&KE#oZwPZr9W@`lQng*&q|%lyne|Cvef4K>+IDOK(g zbo~q=OxtbpA7LSs0-gM2VdSyMh;tw$IXuhD-P8WVc_|ONV~DUBbhDmS!$mToFUVw1 zfiO4k7EjHw4&>?Gv*BN7amXR`<~wm6{d-=%of=kYuouuhUgtjKC<^G*VQc#X zg;!tQ@Z7eQKM5^WXpCMvk5jjx87u45*jJ#w92JTyiFp9Z`g2ANwrZrK)0j z9;=G%31Wzv#*OKufTm@+uIk#N*YfErKz55={HZTE>Y8K@hwFwN4YN*JUG^ezB9RMM zcesBbd9u`i@;3F@JYS=eOIx+8q&6k~$+`@$dMy$mEFm~^B4j8la2C4+ zW8VGu*m7xi*c>L3K7@GEN2tX$7D^$Pyu=*6By^MXaNp&vnOv$#=L`-Uo4%CaRYubv z=R<7yGEt*4<;X#;@rEE9qjTzDJ^Am3Xe+O`rpc7CSvZDrXxNY~4d}*7qUUn*8Ml-%Ff$%I{^C4PL4Ud{j*|ZVLi6AO}LD z!&;oIZ3)BII821uo%Y7Q)gDn1;USO{3y%hpyV5{!b>RRQCN3# zEWBt*jot$scVv$=yd=$-TSCrE!(bY z6#WZIerSV%z}CrfzkO-LYxq!?x85%2#J1wC!`M?=bL3w#P+aaQynjV}+&f*_^l_C5M%5ERxrbfHvD@Tm`GGvJ zy@^wsl8C#9lXdPBrVjcO{>JciKD5O5FLlmtQ6UweiKsYdD-U_$sAe$jUnSaSx99MM zg);?GArgF>x6$9Z|&UTE@0JIs*saitS?Tt7xF%X;#cbOQVrzp;Y57sEal5#J7^hubtmn3l-TjkDE)Gv)|#z$9R)TX>ei9p8|evdCXbVDG;H7*fe-b&H!8pIp$L5qUCDhq~W9-<1zT z{$r~;5hhaAe8M*_eu&AT45Cmk{EoY(t%M%o(qi4lu{wSrEtyn@8AmszATAuRxocAh z=q+F>62f~#lnCBC-Eii}L4t>;Y5>kFJ??(h&uh03o61Mt@M4BEh#=ZGv>c>&5$r`EK-<(tA=6M3DF~(W< zezCHP?}kU}!~jrXb$^NHVoZeMuSmu}!f7u@cP1sW1xtNFBIkgZ8NmZbO%n|z`$z8= z3*UDPi){B7L{k`BZ-$T&bm0#MTTW5vf^;Wxv0J6Z89Z3nBPhrh79>+X$en)sGOBWE z*3W^n{cbp(vnm41FTT?`H-7&c z^7V&@d#v87Fem(t?8S@xESQZyH^v}Hg1i!w1hiJ6SPwY=^7dpr=VsXcCCtzHs1YH8 zC*#xDI~ziKB6YEOHjK*~(>^k@FAh%46Q}P8ApW_{idT;gdNN^7o$R#HL*8m!Ab=V= z#=ecZXCgr3lU1GyTUe-&HOUYp8wxUNVbh%;O@Xs!Ef_uzqsaU=-mn9@PlHs!LPpE-(#9R7DW+$*!1dJI%gt2|!H$9FZ|49H% z_1QrB+~=qC@5V)ubbx>A-r1`4R$0m$j|l)^X8D@4;yraZySZFF>@T zm-ge}j?okTqmHYUI1=Sag?$^BpPC3ueDg6zh@Rt!*$y4UmQlPq&?t$QfRo`5Za^W% zR+v#`$d3W9ksyvbjJc#Wqh}??GO_4+p%&AI=+PbH+RcKO3g@B^-sGq zU~rM75VLaPW}Mplrb)(1fp!#kBf)VLBn^DrmIrNd0<(**af5$g^tBAL2=7)AwZy^4 zL71P`+Za~UZZY6HP^|=DnUXW-T7MYi^HEnv1#=a>o}GDcDZgz;P}9zw);Kl)tKD;t|L@V{{ac^T0ZQrh)3z=R=6~t3CA5A>!If1i=2o99nKe0xg-iMWn ztOsW1`e~>bP!wcHxPNf(vh&<0SmV_JMxkC$eB$=UQBNmtv$*+a8aX9ekVF#2Lvhd) z91ogB^WEtQsU#BCS! zcgB2uwPk#T(Pu~i2OZpyl(gyW6$>5^GvB^Dn8WKt?IFcp)11VH$R6dOUghC$dTN#& zC3Tr*_9|2cNG-oP3<~$%J=!!@0>|TqsYb%yM~ASaCm_I!ufp?oxM^IJK0(iyCo%LQ z3%keZ2)=Az-+Z-aDzoqg%PAsr9qTO$JjgK1Dfnc6zL|2wG!llqDdj2h_ksd04$p7{ z0_ba{fHkSvAPkSRJ4&TyxG1AG+e*x^ybI|s=A>D<9x;FoOUYvZSya*z9;5qbm0`q9LOM30r`X4@ z9^XbFjVRyv0o%jEMmJsR5>U8tNMr@N)zOF-?n9EP=XfyRUbXd?60Nzs(p>9yK>n4C zZ9G)RmivjC2edJqoQ(ZnpfR_S+x{U$?BoI4d?I=)xJ5&XCBa%;S+avbjh%W;qlZA8$aD3mh{h?`a&Y>r%TD6XEz*Fgz5-J*De}sbV?b zaN@Hg@(-C5D@FpVK`dB>=tcSRvlwEe4`&TK_eY8IGD$%16zf{S_T;GuF1!g9oJ`K5 zAylb}fDzHXL~>?|m+hL2+J7g2#Nvxud%nutOqR`AD+!nN2`RYUcg%?R;_(v)f`}iu z_~LjsA8{X9DkG6)29A~T$>l7$nPxKV$rS|#@zqOA=byvYQM$Mk+};@9$|PA$&&#^R zv-#%dCBQoL3OvN75G;s7q+UHdyR=C*M4H8=w`>E+W%bQ-kyS0!Vk_rV4Fdf!-tu(m z%7o4_!Y20M?WRnksid;%*Aut0pgO0A^6^oFiyWURnI0XSmJ&|q_Nk%QL}*6A`Owhe+3nKQ@@IY6$cr zW76O5Bmt#+0EX{mVUZ2oyJVsvM5ZbbA|qvdT> z`Hn2C6v~0ZwYm}tp;Ygy!$doqoFPn8DtU8Mo$O%;#(6k5f}fVfy(4dZ_R-%BHZ2az zdkF?@@(qNgM@(MHL~x3Hn(+Xfte1-l$poSW zkaf|5)0#BXbKd*EOL8y;E)0nqg7vK;Lh=Ci2u?)T1FYO5yJ@p18a$4t!wB=97-MY1 ztR#cyV=^BtOW{o*q0QuHIy4&afQHf$K72&&nYIlLLsY>rS=Oiu*{$x8`S)g}^1#GB z_I@v29ZiobB-hY8cp4c)j}gR0dRGPI%NfuU{9>S)^b-n5LE8N?cxQL~dBdUF>wOx8 zWs4&r^x(Xd5wRGc3kNiV9L)#WT3Zc)X?1L4$MVe=>xP>0(c^G}VABTdy?Azgd#iT1A3Mp&v_7O6B!Lq^a7Ow#Ur>!*@JR7}TXXfu+~#PBc5XO@U^PlX_WQZW0(I z+TANrG73D@{a3ipfTuP~qVE?>91ij^Qwrdcoc+$qS`B8bM$S6X)?H@WuHRt^>rFz* z_)hqe<^r3g-XXu?ZrK~jRE7!_a)^Iiqov&h_?Myc9w}o>S1+{a`TptR?YYahl>2lc zfHMLYG2}tAV|mTBmd_r$nv;OIp6SSOlBE`bNqF^qF1DOevxaY|SpD;U#_|Z%o6OMx z-LU8b;k&`HO%R#h6qS!+755m&MQ1$D?EB0#hklu|^hhOtqP?ONbHrr+`h2Y26?pJ?a(a@8VI=7~z@Z{!%ZOmM<(i#q>ti?f`dZaaW+`g| z5JJ8nqtbDaG&{zHq~ps4QK(c^;JS$>h)fQqOaC5Id;tg2^%rmua|pmolBXb$a}*PF zfy`j(xxN$TtKP?UCu7SoZZa^Ta<+flbKT;4oxD%!>rd)KipP)M!&s8TLP5m#M9;0t zNMQ*L69vYM@aC;Hdc0Q`jj7j8`!vN=pCoDCPf9Z@uW%$@qzEfHVm#Ayu6LeCl6oqh z1)_7ZEF_+AxxuY76$5*#@K;g}l4CbT-QuuZuc&I-a()ue(lfOK=%oTq)7ZF0voO&E zAeL{rKqPj5t*;p)_FwK&H;S;PUbi&von#Gvy0x$UQ#KyHX`F5>JYn!OF^qat{6P#F zI5agm$TTp=t%01J!QYiBe)ANIb9)~9IdsG$GQ*k#>g95lOx^HR?#nU3%G`JN>C^ng z6Dq!VX_4nqIE<6V_Q$TvwbRwPm$?!}bGzW5u&b7mSm>aa^WpLiH&0b8tOiP9g3(u) z?X2Jn?9=5KVSJ@8Y7QYlI-7#@8^`KHVHtazP?~=xNFZ=~4{w zvMkEx$I%S9!S)5BSSO!_XjfN#-+4^X%iFyPW}NAz@|sPuSpsOjWqrDk5Zk+FkCw(l zLNsziZg$sVaUTkW*PkOCQS}EWlJIA64`|NG> zg#=qw1K5V(N_EX=%UUT%9PEnTPRwtYlbj;la)rzUyFAKSkjchPBtH#? z#PZZ6_!H8ogdQk&WyxuKk+YZ`yhnyt0sVddp~!YOJ{pjp?&=1>?B3297)0>{35+&}Mk6ufRFe2X@l zj;18f;~-X~b!H8(5aM$6afEzSfGdl5QN9*}kBokC7E0`QeHNz2%fseE*;6vzR`_!g z_qaZ^4fF%20moS>jPkrIf)uJscN*w%*A!Sp-f#IYfz};M;6=%iG*OVvhm-UDv>hC4 ziLOw#CKC?!C0n&dQD^wicM=~u>57U*M5-Wp5IsX8iBztlR<^$9g3@ z(=7|u{HcRMrXtYUL;~#WI}q^fmChwcwYc<0-3eb4E>S?0VG2uRjcTB|&^U((f3V-C zos2H9PO`a)`YQz=o-cO0J{ znEP5sCJ*YtaI^}K^LstxgJ@Y~%52{*k%%9sL~(QLpbYBC~E_*Z-^z{A7Q>O%tq zNx}%}23en?4B=2XvA{Gp+Ra&4m%Cu?!TE0ukl&YW_7W>q_+i)HKSvhrUWYNxvTzu- zB({OOLyPqA-B7*iw5*cVAA(=#NcMSvE!Hj^rbd?a1HF)VgYihU1`>9Wb zpDn;(hu;m-YpHIT6=3%WEr)UnlUGN>(nA$5`@Ou?lNsU3UgwSxY0FAhm}VsV-%N(f zvr;;G>t}n4AY+mj#RB5V7LNz*pLfpfI-L8%#XvH!i*=Sjgz+&b{O7P;UKF~zY5i%z z79zX>WX?}TaS5Q8vDipkJhlD801~5^$rRG(!Gp=F2Gw!Pe0Qp8~I{-F%x|)nZL-*l%d#luB`yU0&Br`zXzEAgF z+4t`9($qcEUyTXKxM>S|(*Q&e*%PSjiT_aG$Lauvn!&UfDD$Dr1s zsjNn5fq^_rLfeyxV=j*$OhDVE2}wUM9c}XyWZINlfzTA8N4LTt*>~wkV_v&2H7sX1Ny%l?MjDOQkRNWg-I>2R8Hc+mCtPgKTiN4yKr{leekz`B^{`d0Db zpt{Qvw)TwLk4u@(EhZxY>Lme@4-nK%j;UkNWEF*Fo(|p`R_19PO!Hx27yR_m0q*>% zGOltP+1;&bdZmh?6%8;uPZ9TX5Zpfbj=rbwmC1hXHGF`_*X72fA!<>009I?UTl1ZE z7a)|=FW~5NLEZFJ^iUvbxBZXyPQ74GGyJsHw3MDC@(x}54GF;Z#0MYH>pblZdsRg@ zxez2qdu&V$Vg(a}Ohxb_X;U_^uTc&EC^3QDV4kclH^WRS#B*5nX}06qmY(^tidQ&-K5iFtKYUr1|& z-)s>?G6W%)xbN|h7?Q!y2ARw&4)F~0396^E#B1^sB|@2S{jwG|T&pB!&inEOk*h0;E2C=wYG03X)-&zJ@)Ui)edP*Co85fMTc6`OI5RhVTP z3CVp=a%Kyr@;-+X$1e4qTAgA$f{7hoAK$rXm zfTW4xO|DRmcUc3-2_D&3Q1y(SSG=HnMX^Y9gYvTn3(YCQ>jtgnk>I)MTD50tFLZED z&77^{E|+E8mBuL`30oVj6Ed#7_mGWZ09rptFZ+ngwRZvqH45J@9> zuO~~yx>;g+h36-Pb4oMwgrv3?67pZ1-|js>lHR!^J2U}-;PmlC=1^g^bh6*ngF9K~ zqP8^q0t{2fM9N4#u{k_rpiQ=XANco`5;SaNwiqi5UY zf>>xMIj{vlPvQ`(a;K0KQuxJWXaRWwyvFt^xdPjnzME@A-of_Q{7ncq61za*)YT#( zVv+hwJ0Dt2LP%cKU~iCo&4-_-i!trQde*)s*w8$fi)NEuTI~~mkYwolQvSs~u81YB z`#FNzFx};sDqH|fqrL{Df3?8*X}t0OgPB+$qblUhE@YncvkQxPx$wv+6ld=@%1xS( z&20Qv0tCoe6}@UwgH)hL1D)NsW$01M3s{Q+4Zf;5EZe6>S&LQ4K2daI>vtb0 z$`64nY~*ONpEp%3H5s%82a>q?P)kh#yOou&ea+3Vr9EoT9o~!pkJDeGhMP|Yo`CqV zZPBcx(4T=C&9%wb&1+i8n z9uPsLHfseTVqK9hftC_W#q#)DPbVKyN$A>(g09aUI3Tqvkrpq)sg*5`O;1%oCGLK! z?)UN5yqQLcc0$c*&)9db7-Iv7KUH}cnI zvGvVF3T-4Re4KnHWC;_-J!KH&%>Tzn??@GhRID=gTTBy0#6~*U5}x|@BeOz{=W`Dh zEU>7JDBVLrxW)MuOAY>Zh2Rx1AKqGes+9KOwV$(5udh~$KPOyH8%0l-Z;R*k#Xwy0 z;QEV-%gx3MK^KU&0ErFi@IIjb>3}HpXT#k>cZKd2mZO7PpqU!wn?hNyLaweD zB%b!1OieDK65JM2aN#4cJ6Elo;E-=zP89;-O=lo^8q_fseyOE%63KX;8eK)uj#$=T7w zz~(<%c7~QvjEn>f1pnD!U}R)t`#;^R|JBXSO+YVdVeM?<`1`dsa5fP(F|so@A)uEw zu{CoxCtzg#L%_=m^fx2|xWSrA7>ZG~sK(F@T@ zl-%N@Eo+k9;RW6_U`DO>;R9TD!s-7%h=A>XIaelT_Ww3lS_O_oU;&896I4$@r^TE4 z$%L1mGpGr`pim(KPFe@~5jliZ>Ye*Bm8Sih42*ua{({{J7i=^R1XS?_3}u-zoi4m? za912tpod}>njp*zU_K)#Hctn4>5=s-7fcoquT?8KQ+Xz8bcgXFP$ieOv5Y~vztPZF z+Ox1@?T9NT7{ysBBhkUcGK7la1?Obte+1$|iYgU~VsM6i&nAg;3{;3MVLr5_HdmdH zXRw&L&?>n(O2pK%C&!1OslmUxru#&cu`0O{knDRmg}$;c57~AW+8c)-Uv-@tx)@N^ z?~a?X<7Jv(+35eh@*G^eeFB%2XrcWN0%ZSR3XqxQzsc_pQ>#3Kd>u0&QYac3)UhcT z6lJJ(DuOB=6wrLCJrq$c7(kv6O!NOhl;eL%l%4Uv5#^ccMD=HY0G;Di>OvIs z_P;!+U-;7M7shV$UIxY`#9V^4#SLq!l5$J(IrnUiC?4})P_op(2lK{F6gO%p4)Zgzyja!_NZhYLqv} zc#%S957k02ID2DELQd;CH#jH=CB9D2=5)D-O3JH(z>?{k)!S_q+6@O?_J82$|JmdJ zf18ZK%-yI-!QvTq@~lI&ZSYq|CaS+bR^k+l%|RYJB@3fW3p6fOE?%^oSV@XdRr zknjKZe9y--&&+$?dC#14=FB-~=8VTJTDGUONN0^|hW#e=Z4Z4u5?t`CeH%T440C(! z6LkZs8XS_yllBCUy$sjb_-&c|5-)RuKilHy9=p`%o?2F3H@$|>_wbuPi zp|)Q{#!p`4*D-x(ed?K&%S*>o9AAry%l^sh4}H^Sip}0QUdT80leW-9Ds&A_vCBBs z^Ry)Ux&l+LOmcUIkn-{*AIJpShnB=a`!HAngvLlpLPQJ^GF{U}Advc$uZ^`WPuN(I ze|rq5o}>d0BiS?>~|2duZ}9CT;d& zv+3k*8IJAr6(Pbp8;^(kchQTJwx#@(e01{D@uS299&HT7)iwP|vB^B;;K$lgo);Du z9fZGyhdw#0iswC|Wv0=tadqn+?XJKp4C-aqhM$a;ikFQ!YD;#F6$&S5LzyNe8&YD8 z4slv>Pb>C|H%!YFm4;=8PF*_5R5Di}z-oFaJy>|t=F11AiwDcIB8|ebJw3C^V{>>! z__Y@ZIJ}x*ADd8m*Rx8K+4)TWpFe2w_4TsW5Ep*k;#xU5N;+!p8wD6|&+MHJE_|@} zQ>NDA!p{qiVpugTPIE`JLvZjupGbk1wU2_4RVOt+;!a9n-h7hk+U}}dDgHL4HN@^g za;=emiuWC)X_sn%cSKR3t+jut>f^YiB&jd*xZRxsEV4T>&4ywgr*lQF>Gt2q_T79TFb@HNTu$&_7hWVdyZqDA=mzRrxfV!@pHu z%|2}2AFyz4+qbIj0}scK39ccvb;m&1Rm=Wd>lPq2g$ex+tW(_k3nMx*JLhkVpwi^z zXXON?CsB0&e4Zk-H+*_}sZL^^&~eo&No+^8pROVEP+UaK6vtr)lumuvkwg3n7Ht<= znvyXUydTOnPygeTq<`(Sm&_*S@+&r1=>iL#Er!J=86OxNuWa0MAWq|&_Z@?~r{wie zDQobu-WW%6r2^nJt5|ALbR(%DERQaCjwWdIkG|hDS}7+L044G|_ECp?$RG*K<|Da7 zJ?XSXZwpB|<%E|a;v+v85qu1;M}55&A35rjkM7L2`S!G}LhZ@syl&nLYmm3z2tpFG zayiG|=b%Dwq@5^~rx#G2E+uI{TkupH4MQ`VDdn8Lwns|%5XPstA-#?R z2s>cc`?*HTXy|7}lB?7^nQ2BkdW@`%YW`b7vUO-5-&CHF^_h;iEakI`^;_SIU_8@)P0nvD@W$yO+r}XDv#J91qr)>Jt&hx!8 zlKVz%{Xpj3cLyF2vqrviaaNxG_wT6zrhRei^54Q5G=AkC1rOF|J&I0o;<_b|apVnw zFV;zVFQbdU$DJc$fpLg$cuWAoJGTg11o!N&3^=RZXcAii0? zx5dZYhQ+GjuYI3xQ}nr+<~;SUq|;;VLAl;DA*ZF}rYdNhvbIL1_3QjOx4B65cou^Uo@{>qZq&ibmsyS0$ z?lXS7^M00++um+3<8AA?IrNR?cD!Lg`+#z_kbQ{CsGh^yWH+{Y8a(Z=$ndetHCwB) z3XM8(@$bq?;vFUP-`^Q599ej7&~0Y+UOZ+EmewDEtkfLM!x}D%0*r4wqK^Hmwo8Dz zO`*eJaQ@BBI{h)x@NK75nTJXYwvXspdPa^;iz+ByX_pLbIWBWUDXxxt8K~z~qCy?s-j^xx@v>$SNX-56B zptRqv>dTR)6y#WXYzj9-(bY3H#2BK4($qd;U^rKHEHO;_%VGMWS5Wy>@ z;Ag1%&U{v}L3Ur%&!(xz*5yOD_^xKeZio_Of7B;BZ5-T@A=2~cmg221H!<#ShxkwT zY#F?su3u8A^H8ctibFDgu;p9P_>W4lmb*J6Ftp&$o0$l%{pA)hY?mJ!dPjU$jJuLh zc)7S_Yh6F(Hu9r;L8>le@HL~+S?@{dLyot5k?7{AsvxCt_7|UJxvq^c@=r9U zTDdu+|qvji1soB0Z=RNpdK|gDs z%f;8!H*i0*gj}pSxOsXoSM#&HPFxYHoopN6X5YtT$i#KrUj0VY$0u?Vn^T9m($*kp zy#Wt#<^Dx8R*tn60xP^mQ2VfA`vTeT*@D^C7fg3*wKha@DuqnrPiUaVD={37hea+V zrqmjsjxoF*ztl31Q9G0o{@lNU=5)!V+v#U&-@m%~QC|&SUEuExd z|IvhTLA7hIFF$99p$oQS94lrlzI{=fkGl6(e!f|n$k4@3?2Zc+pHwf(Tc0`FGUGPP zZs)&1bznpi`!}z+^+pmH^r|zP!?#%!&~ziRvC%}*@~7)_{nM>a_SI;~o>i>AGZ*FPmM?`+bX7hv6-BmaC}~p2?bwKyrE$DPb-mkf zPQqS=%pEk!7TJr-<5O68S=l&36A`65y@q(!8-`%9E8~!Ty2P`)(2T7=5rO7ZBU6)^ z+Yi0?zCczpprtKxYL)48uTvBX6pMSK|4g_PKu^HJ5IFi0qo-HXJRaZFSo96 zS7&eZ!u|#it`N?aXw=glIbDM0 zn}!6gV7BI8A5s^t(BbeNZa!98RgpSqKFgt@s;Rh8p;HA3rh6T3!d@@!gE+Orp}pWdg; zaGzSB#`c_~USm?$8;fDkD@_RgaO)PJv$yRKoCWL~f~_+R?|3daI=4$(uq?cNqe($Cr!!=K#(UvqQbMHh>YRS!~ojvBRJiCtvaY~qfbcp4_lRt#y zizmqkHwb-7J?ZKH@vb(v&k1gKuZ=7U&F1tcsyQ`{%Ve_Rw5WR)0_(>2_2tBWZpq}G z<4qs?BwgzALHLB1;8S-F%!^W7<2E`rrR_4lO8Krmr7yiZaI7zE8MKnAuWKEMVWKh@ zJbol-^tvKZG17X1#sReOUp%P{!`39(_1y^ z%{^IlW!Er)qX*)TvR*h-tHF3Pk<_E2EGNrwk1MPPTmOVBOs*|7_h9-L?yadl;jfil zK6<&f)zo2Qh#R;Xj=JVQPyKqVvoC$K@YXix#K{=>YBhZhH~lQPLz6`%PhwvM=iPED zeSUsaAU0G(O=x#Ypq}VLnTUz#$Ja`1#j1V537WVMVh0rBEgxkk>dc^Sot<_oIj`c` z-rSW=+?U!ler4>?NoM2SsXF~48H1Sp!iArYX$6dz@;~jnlxOfg{b%?2_HWKGV~oXtM6e+}uaH~Pe2R-F}SOPA0HFpdzz#N)VQoXww{ne|yVa%Ve&N<*0-&Ne!v3UiXc8gx$1Cc&%P3I$4>H46(mtIkbmHAY%+A z)WpXj5>RqEhA5@g7ddEI)F6Iu!Xn^)KKjxFJ|kM~cI(5*vJ<hq`e#SzA7$6|uchok6ZEEz{oLN$Kh zKN#d{xDw9J32`gQ(4Am-%cW1NVilD!+xaeMG_PlfT{C)w#p8KdE?GiH)cHn)Qrh6rsgHUx{7^&V+?BDe?j5%Kv9H$PX1(zyex;emVk8XU zJdPd;ed{m^SD}xRDA89%_l2N6?u*4)GjZv2T%e9`4Zhm)L7j$bGdGjj8e^|FMkB1; zldYLUI(WA*vPxc@OO>&{g}Rh6s7xq54MxxpmaYIP2Xos+1{@Yjf(h| zOY&tPhu!oGPd|LHlQtvDGC~4MlaR43p`C^yqNS zgtS3X4eknt%(`2VgUJE)%o4bQ&7QFu54ODPus?!$?CQB7eMhCs8!y9kQGfUa*Eud7 zj)6G=?Mo3u>OCiA_6QvFM=5i%_US!6c((M|wW;IsUzsX+N*>6aP#Y{2mg)5L(-wE( zdc2j{x=MdmOJ{Vf9ea0FonDr;{Dbj$fOOi;A)bqpqk$93FI!@++Y1CX83~UFnZMQB zt;A}rRc`DO-H|z-vnSsp+=Ck$?urW69k4XIdWun+B!=t~Yf5~?fKk4pnJMYh6d0Q} z?VB{{^OM@?vxoHGg2%5n+FX64h}R`ve|L~^BmiLw1$jMAAEYe|9Dg64XogTIGmJ_* z%bDabXCq*+FG2j?n3{7W@6=2)B9blMNa!{AE1bh~GGY4n>{@H?zD#F&*eCh)|EbiBhEBLH9RJT;S zz2=xu`0U%n1KD0kjzX9kMsfzqO=hcH3z#w1H-?^$@u|7S#2!8$*#3hohlTW_3I|E9Xv@z87U%W4jf1QV{OD3K&c z0kV|`Ka01VI{p}v(|ftc;bePJhSi=lZksS~zBEQ$BVWWLHkk-4YvOdko;hQT$5cEH~7rMo;9TP1soJ*^E3ap@M_j~{s#?Td651Z zQ?uSkS8`=cr4(~}3pg24^v^fn7`WRuYfn9Y!=n_@^X`X+{(R11P-+#xg^CV?(m z7S15aC+|RByu@K><8Gm6;o`ARm*i~W0wH14huuk504MD2bQkgb$84ayg@+A1*#C+Z zfmCt<*GL>(>>wE#1X9t#*4D<|#>L9U17sMnL2Td4hW>whmr45ZJpTH6RJlR|}E^Z36^R0zz6qNJ|K51tF~=B-|fD z+CfNr2R2f_6i_g@o$f^(%VWK9dQy91mnMTw(OuttI3-z`)4NO9@%&GJB1!NOJ5#=*{> z3`u~KZTPZ{od*OqR1}o|Wsda7HqM3+3V~E3TR1sb$x-A6kifHT1LlE}uSLo&KENs4 zWqR>PBi1pqnif731p~^11Ed-Mq8Pt;abSt9rLBnxL_nebKMLaiiU*69gm7REa0C=2 z2~JB1SOS6w46p=Pz!;!1Sa7}q7d)U7eh>G@f-F9z2AOKm|34MP(t9lE3l|v2Qo)0? zAQ}Y=0A$Pwk^m7=5|WSr&tO*ZU~mbzfLIDc#3Mj~2zUa72O)(dxKkwov|a)v;|M6w zhHLme78Gz{O9Ffk<0MKFLA?lrYYH=12?SijM8HhJg#cDM{4E|ZzK8?#1>;iMSR4?9 zB&Fabh?JTLZl(|boPfpv1{cT0fpPH|AQ&S29S*{SS`r8gt|xQGG+ zI0m2LVX6S-Fh#H$EKS!^4Wp;a)J1qD%l2{QMgWOcV-* zpuGEqfJh)hO9fVhMIC_kU}d4m8t4VA6Uz!kA7GtWDimG$r4z6|EH5x!lxKhg){Uh? z;T(kratE}+IjK%kTs#l8dX79U_U4`!J1-~wSN9u5JlG;kOcm=%P9 z{UpF(34GyV%MM6zvE>7Fd$FYfVSb?`Lm*KFzCZxuPPw-QzC8gOT?!V8(*b6V^56>r z&L}Ncuy__*&Ok>NTUroU{1ilv5c=1EUO;OX-;#jZEVgVRaO6P2;sN1)y)}R+nM^P= z@O2cQi~#Nm_iHq7u%s5hSm6_Yjq3mb%S*xM4*N+;%L2ktC_w!3l9YH8V28aYesRn{ zE@-(2QU?0IIO?DG@H2cp6wa~Ap@Fvs-qZp@FF6n;AF_%b8C(kmae^G^3j0Gj%zx>EHb9Qj?^nk!(hZ+CVZ^{3_)Gl5^I&5JFVhf}K z$rE@(K)r^8H5_VyqcQl)8kUg2~ zDuqP;8A6=oZYQ#gA0KxcTLiFXSOjnM6%2<4-g`9+3)>L*=MNt^-0Jy2qW~L! z$pDZ3OU@`f2AsmI!iUEZ{-POAfKBeoaq+OXSOp`XVJo(>9{~rVqt!6NUpk2*NC1zz zs-FZ3Hs&kwfuQ=YF(f2_*IM0=2)ynpm?Re7E3L#Q347DkFv-95MUwc}nn4j!l7D?m zTw@FnK&<8iED(50v4R$`&X=!2vb%+YlZ`vYWa~Nj0izE#iMk{bZ1^Y+Le0gN1VW=F Zqv=7ma3@nt46qoIAm$SiQqopN{0|`9oT~r; diff --git a/examples/tutorial_work_with_onnx.py b/examples/tutorial_work_with_onnx.py deleted file mode 100644 index 4d9de2cf8..000000000 --- a/examples/tutorial_work_with_onnx.py +++ /dev/null @@ -1,343 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -r""" -Play with ONNX models in TensorLayer. - -This tutorial is corresponding to the onnx-tf tutorial: -https://github.com/onnx/tutorials/blob/7b549ae622ff8d74a5f5e0c32e109267f4c9ccae/tutorials/OnnxTensorflowExport.ipynb - -Introduction ----------------- -ONNX is an open-source specification for neural models. It has the following components: -- A definition of an extensible computation graph model. -- Definitions of standard data types. -- Definitions of built-in operators -Caffe2, PyTorch, Microsoft Cognitive Toolkit, Apache MXNet and other tools are developing ONNX support. Enabling interoperability between different frameworks and streamlining the path from research to production will increase the speed of innovation in the AI community. - -To run this script, you shall have the following pre-requisites: ----------------------------- -- Install ONNX and onnx-tf package: ->>> pip install onnx ->>> pip install onnx-tf -Note: When installing in a non-Anaconda environment, make sure to install the Protobuf compiler before running the pip installation of onnx. For example, on Ubuntu: ->>>sudo apt-get install protobuf-compiler libprotoc-dev ->>>pip install onnx -More details please go to ONNX official website: https://github.com/onnx/onnx - -- Testing environment configuration: -Ubuntu:16.04.4 LTS -Python:3.6.5 -TensorLayer:1.8.6rc2 -TensorFlow-gpu:1.8.0 -onnx:1.2.2 -onnx-tf:1.1.2 - -Tutorial structure ------------------- - -1.Training ----------- -Firstly, we can initiate the training script by issuing the command on your terminal. ->>>python tutorial_work_with_onnx.py - Shortly, we should obtain a trained MNIST model. The training process needs no special instrumentation. However, to successfully convert the trained model, onnx-tensorflow requires three pieces of information, all of which can be obtained after training is complete: - -- Graph definition: -You need to obtain information about the graph definition in the form of GraphProto. The easiest way to achieve this is to use the following snippet of code as shown in the example training script: ->>>with open("graph.proto", "wb") as file: ->>> graph = tf.get_default_graph().as_graph_def(add_shapes=True) ->>> file.write(graph.SerializeToString()) -This code is under the code where you call your architecture in your function - -- Shape information: By default, as_graph_def does not serialize any information about the shapes of the intermediate tensor and such information is required by onnx-tensorflow. Thus we request Tensorflow to serialize the shape information by adding the keyword argument add_shapes=True as demonstrated above. - -- Checkpoint: Tensorflow checkpoint files contain information about the obtained weight; thus they are needed to convert the trained model to ONNX format. - -2.Graph Freezing ----------------- -Secondly, we freeze the graph. Thus here we build the free_graph tool in TensorLayer source folder and execute it with the information about where the GraphProto is, where the checkpoint file is and where to put the freozen graph. ->>>python3 -m tensorflow.python.tools.freeze_graph \ - --input_graph=/root/graph.proto \ - --input_checkpoint=/root/model/model.ckpt \ - --output_graph=/root/frozen_graph.pb \ - --output_node_names=output/bias_add\ - --input_binary=True - -note: -input_graph is the path of your proto file -input_checkpoint is the path of your checkpoint file -output_graph is the path where you want to put -output_node is the output node you want to put into your graph: -you can try this code to print and find the node what you want: ->>>print([n.name for n in tf.get_default_graph().as_graph_def().node]) - -Note that now we have obtained the frozen_graph.pb with graph definition as well as weight information in one file. - -3.Model Conversion ------------------ -Thirdly, we convert the model to ONNX format using onnx-tensorflow. Using tensorflow_graph_to_onnx_model from onnx-tensorflow API (documentation available at https://github.com/onnx/onnx-tensorflow/blob/master/onnx_tf/doc/API.md). ->>>import tensorflow as tf ->>>from onnx_tf.frontend import tensorflow_graph_to_onnx_model - ->>>with tf.gfile.GFile("frozen_graph.pb", "rb") as f: ->>> graph_def = tf.GraphDef() ->>> graph_def.ParseFromString(f.read()) ->>> onnx_model = tensorflow_graph_to_onnx_model(graph_def, ->>> "output/bias_add", ->>> opset=6) - ->>> file = open("mnist.onnx", "wb") ->>> file.write(onnx_model.SerializeToString()) ->>> file.close() - -Then you will get thr first node info: ->>>input: "cnn1/kernel" ->>>output: "cnn1/kernel/read" ->>>name: "cnn1/kernel/read" ->>>op_type: "Identity" - -4.Inference using Backend(This part onnx-tf is under implementation!!!) -------------------------------------------------------------------- -In this tutorial, we continue our demonstration by performing inference using this obtained ONNX model. Here, we exported an image representing a handwritten 7 and stored the numpy array as image.npz. Using onnx-tf backend, we will classify this image using the converted ONNX model. ->>>import onnx ->>>import numpy as np ->>>from onnx_tf.backend import prepare - ->>>model = onnx.load('mnist.onnx') ->>>tf_rep = prepare(model) ->>>#Image Path ->>>img = np.load("./assets/image.npz", allow_pickle=True) ->>>output = tf_rep.run(img.reshape([1, 784])) ->>>print "The digit is classified as ", np.argmax(output) - -You will get the information in your console: ->>>The digit is classified as 7 - -""" - -import time - -import numpy as np -import tensorflow as tf -from tensorflow.python.tools.freeze_graph import freeze_graph as _freeze_graph - -import onnx -import tensorlayer as tl -from onnx_tf.backend import prepare -from onnx_tf.frontend import tensorflow_graph_to_onnx_model - -tf.logging.set_verbosity(tf.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) - - -def generate_graph_and_checkpoint(graph_output_path, checkpoint_output_path): - """ - Reimplementation of the TensorFlow official MNIST CNN tutorials and generate the graph and checkpoint for this model: - - https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html - - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/mnist/convolutional.py - - - For simplified CNN layer see "Convolutional layer (Simplified)" - - Parameters - ----------- - graph_output_path : string - the path of the graph where you want to save. - checkpoint_output_path : string - the path of the checkpoint where you want to save. - - References - ----------- - - `onnx-tf exporting tutorial `__ - - """ - X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) - - sess = tf.InteractiveSession() - - batch_size = 128 - x = tf.placeholder(tf.float32, shape=[batch_size, 28, 28, 1]) # [batch_size, height, width, channels] - y_ = tf.placeholder(tf.int64, shape=[batch_size]) - - net = tl.layers.InputLayer(x, name='input') - - # Simplified conv API (the same with the above layers) - net = tl.layers.Conv2d(net, 32, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1') - net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1') - net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2') - net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2') - # end of conv - net = tl.layers.FlattenLayer(net, name='flatten') - net = tl.layers.DropoutLayer(net, keep=0.5, name='drop1') - net = tl.layers.DenseLayer(net, 256, act=tf.nn.relu, name='relu1') - net = tl.layers.DropoutLayer(net, keep=0.5, name='drop2') - net = tl.layers.DenseLayer(net, 10, act=None, name='output') - - y = net.outputs - - print([n.name for n in tf.get_default_graph().as_graph_def().node]) - - # To string Graph - with open(graph_output_path, "wb") as file: - graph = tf.get_default_graph().as_graph_def(add_shapes=True) - file.write(graph.SerializeToString()) - - cost = tl.cost.cross_entropy(y, y_, 'cost') - - correct_prediction = tf.equal(tf.argmax(y, 1), y_) - acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - - # train - n_epoch = 200 - learning_rate = 0.0001 - print_freq = 10 - - train_params = net.all_params - train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params) - - tl.layers.initialize_global_variables(sess) - net.print_params() - net.print_layers() - - print(' learning_rate: %f' % learning_rate) - print(' batch_size: %d' % batch_size) - - for epoch in range(n_epoch): - start_time = time.time() - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - feed_dict = {x: X_train_a, y_: y_train_a} - feed_dict.update(net.all_drop) # enable noise layers - sess.run(train_op, feed_dict=feed_dict) - # Save the checkpoint every 10 eopchs - if epoch % 10 == 0: - tl.files.save_ckpt(sess, mode_name='model.ckpt', save_dir=checkpoint_output_path, printable=True) - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - train_loss, train_acc, n_batch = 0, 0, 0 - for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - dp_dict = tl.utils.dict_to_one(net.all_drop) # disable noise layers - feed_dict = {x: X_train_a, y_: y_train_a} - feed_dict.update(dp_dict) - err, ac = sess.run([cost, acc], feed_dict=feed_dict) - train_loss += err - train_acc += ac - n_batch += 1 - print(" train loss: %f" % (train_loss / n_batch)) - print(" train acc: %f" % (train_acc / n_batch)) - val_loss, val_acc, n_batch = 0, 0, 0 - for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True): - dp_dict = tl.utils.dict_to_one(net.all_drop) # disable noise layers - feed_dict = {x: X_val_a, y_: y_val_a} - feed_dict.update(dp_dict) - err, ac = sess.run([cost, acc], feed_dict=feed_dict) - val_loss += err - val_acc += ac - n_batch += 1 - print(" val loss: %f" % (val_loss / n_batch)) - print(" val acc: %f" % (val_acc / n_batch)) - - # Evaluation - print('Evaluation') - test_loss, test_acc, n_batch = 0, 0, 0 - for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True): - dp_dict = tl.utils.dict_to_one(net.all_drop) # disable noise layers - feed_dict = {x: X_test_a, y_: y_test_a} - feed_dict.update(dp_dict) - err, ac = sess.run([cost, acc], feed_dict=feed_dict) - test_loss += err - test_acc += ac - n_batch += 1 - print(" test loss: %f" % (test_loss / n_batch)) - print(" test acc: %f" % (test_acc / n_batch)) - - -def freeze_graph(graph_path, checkpoint_path, output_path, end_node_names, is_binary_graph): - """Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together: - - Parameters - ----------- - graph_path : string - the path where your graph file save. - checkpoint_output_path : string - the path where your checkpoint save. - output_path : string - the path where you want to save the output proto buff - end_node_names : string - the name of the end node in your graph you want to get in your proto buff - is_binary_graph : boolean - declare your file whether is a binary graph - - References - ---------- - - `onnx-tf exporting tutorial `__ - - `tensorflow freeze_graph ` - """ - _freeze_graph( - input_graph=graph_path, input_saver='', input_binary=is_binary_graph, input_checkpoint=checkpoint_path, - output_graph=output_path, output_node_names=end_node_names, restore_op_name='save/restore_all', - filename_tensor_name='save/Const:0', clear_devices=True, initializer_nodes=None - ) - - -def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path): - """Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file: - - Parameters - ----------- - frozen_graph_path : string - the path where your frozen graph file save. - end_node_names : string - the name of the end node in your graph you want to get in your proto buff - onnx_output_path : string - the path where you want to save the onnx file. - - References - ----------- - - `onnx-tf exporting tutorial ` - """ - with tf.gfile.GFile(frozen_graph_path, "rb") as f: - graph_def = tf.GraphDef() - graph_def.ParseFromString(f.read()) - onnx_model = tensorflow_graph_to_onnx_model(graph_def, end_node_names, opset=6) - file = open(onnx_output_path, "wb") - file.write(onnx_model.SerializeToString()) - file.close() - - -def convert_onnx_to_model(onnx_input_path): - """Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model - - Parameters - ----------- - onnx_input_path : string - the path where you save the onnx file. - - References - ----------- - - `onnx-tf exporting tutorial `__ - """ - model = onnx.load(onnx_input_path) - tf_rep = prepare(model) - # Image Path - img = np.load("./assets/image.npz", allow_pickle=True) - output = tf_rep.run(img.reshape([1, 784])) - print("The digit is classified as ", np.argmax(output)) - - -if __name__ == '__main__': - - # 1. Train the CNN network and output the graph and checkpoints - generate_graph_and_checkpoint(graph_output_path='graph.proto', checkpoint_output_path='./') - - # 2. Freeze the graph with checkpoints - freeze_graph( - graph_path='graph.proto', is_binary_graph=True, checkpoint_path='model.ckpt', output_path='frozen_graph.pb', - end_node_names='output/bias_add' - ) - - # 3. Convert the tensorflow protobuf file to ONNX file - convert_model_to_onnx( - frozen_graph_path='frozen_graph.pb', end_node_names='output/bias_add', onnx_output_path='mnist.onnx' - ) - - # 4. Convert thr ONNX file to specific model - # the following step is not working by far as the tensorflow-onnx project has a bug at the time of writing. - # convert_onnx_to_model(onnx_input_path='mnist.onnx') diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index f89eebfff..3fab2fb2f 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -2,6 +2,12 @@ # -*- coding: utf-8 -*- """Deep learning and Reinforcement learning library for Researchers and Engineers""" +# import backend +from .backend import * +# from .backend import ops +# import dataflow +# from .dataflow import * + import os from distutils.version import LooseVersion diff --git a/tensorlayer/activation.py b/tensorlayer/activation.py index e2d3ac3b9..fcdd52fe1 100644 --- a/tensorlayer/activation.py +++ b/tensorlayer/activation.py @@ -19,7 +19,6 @@ 'htanh', 'hard_tanh', 'pixel_wise_softmax', - 'mish', ] @@ -48,7 +47,7 @@ def ramp(x, v_min=0, v_max=1, name=None): return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) -# @deprecated(date="2018-09-30", instructions="This API is deprecated. Please use as `tf.nn.leaky_relu`") +# @deprecated(date="2018-09-30", instructions="This API is deprecated. Please use as `tf.ops.leaky_relu`") def leaky_relu(x, alpha=0.2, name="leaky_relu"): """leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`. @@ -98,7 +97,7 @@ def leaky_relu6(x, alpha=0.2, name="leaky_relu6"): This activation function is a modified version :func:`leaky_relu` introduced by the following paper: `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper: + This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ The function return the following results: @@ -145,7 +144,7 @@ def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"): This activation function is a modified version :func:`leaky_relu` introduced by the following paper: `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper: + This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ This function push further the logic by adding `leaky` behaviour both below zero and above six. @@ -278,7 +277,7 @@ def grad(dy): # A ``Tensor`` in the same type as ``x``. # # """ -# tao = tf.nn.sigmoid(x) +# tao = tf.ops.sigmoid(x) # def grad(): # return tao * (1 - tao) # return tf.sign(x), grad @@ -306,7 +305,7 @@ def hard_tanh(x, name='htanh'): return tf.clip_by_value(x, -1, 1, name=name) -@deprecated(date="2018-06-30", instructions="This API will be deprecated soon as tf.nn.softmax can do the same thing") +@deprecated(date="2018-06-30", instructions="This API will be deprecated soon as tf.ops.softmax can do the same thing") def pixel_wise_softmax(x, name='pixel_wise_softmax'): """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. @@ -340,25 +339,6 @@ def pixel_wise_softmax(x, name='pixel_wise_softmax'): return tf.nn.softmax(x) -def mish(x): - """Mish activation function. - - Reference: [Mish: A Self Regularized Non-Monotonic Neural Activation Function .Diganta Misra, 2019] - - Parameters - ---------- - x : Tensor - input. - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - """ - return x * tf.math.tanh(tf.math.softplus(x)) - - # Alias lrelu = leaky_relu lrelu6 = leaky_relu6 diff --git a/tensorlayer/backend/__init__.py b/tensorlayer/backend/__init__.py new file mode 100644 index 000000000..9167b0131 --- /dev/null +++ b/tensorlayer/backend/__init__.py @@ -0,0 +1,5 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +# load ops +from .ops import * diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py new file mode 100644 index 000000000..b49fe7326 --- /dev/null +++ b/tensorlayer/backend/ops/__init__.py @@ -0,0 +1,115 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +# load nn ops +from .load_backend import padding_format +from .load_backend import preprocess_1d_format +from .load_backend import preprocess_2d_format +from .load_backend import preprocess_3d_format +from .load_backend import nchw_to_nhwc +from .load_backend import nhwc_to_nchw +from .load_backend import relu +from .load_backend import relu6 +from .load_backend import leaky_relu +from .load_backend import softplus +from .load_backend import tanh +from .load_backend import sigmoid +from .load_backend import softmax +from .load_backend import bias_add +from .load_backend import conv1d +from .load_backend import conv2d +from .load_backend import conv3d +from .load_backend import lrn +from .load_backend import moments +from .load_backend import max_pool +from .load_backend import avg_pool +from .load_backend import max_pool3d +from .load_backend import avg_pool3d +from .load_backend import pool +from .load_backend import depthwise_conv2d +from .load_backend import conv1d_transpose +from .load_backend import conv2d_transpose +from .load_backend import conv3d_transpose + +from .load_backend import ReLU +from .load_backend import ReLU6 +from .load_backend import LeakyReLU +from .load_backend import Softplus +from .load_backend import Tanh +from .load_backend import Sigmoid +from .load_backend import Softmax +from .load_backend import Conv1D +from .load_backend import Conv2D +from .load_backend import Conv3D +from .load_backend import BiasAdd +from .load_backend import MaxPool +from .load_backend import AvgPool +from .load_backend import Dropout +from .load_backend import BatchNorm +from .load_backend import DepthwiseConv2d + +# load ops +from .load_backend import Variable +from .load_backend import matmul +from .load_backend import add +from .load_backend import dtypes +from .load_backend import minimum +from .load_backend import reshape +from .load_backend import concat +from .load_backend import convert_to_tensor +from .load_backend import sqrt +from .load_backend import reduce_mean +from .load_backend import reduce_min +from .load_backend import reduce_max +from .load_backend import pad +from .load_backend import stack +from .load_backend import meshgrid +from .load_backend import range +from .load_backend import expand_dims +from .load_backend import tile +from .load_backend import cast +from .load_backend import transpose +from .load_backend import gather_nd +from .load_backend import clip_by_value +from .load_backend import split +from .load_backend import get_tensor_shape +from .load_backend import set_context +from .load_backend import resize +from .load_backend import floor +from .load_backend import gather +from .load_backend import linspace +from .load_backend import slice +from .load_backend import add_n + +# dtype +from .load_backend import (DType, float16, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64) +# initlizers +from .load_backend import (zeros, ones, constant, random_uniform, random_normal, truncated_normal, he_normal) +# backend +from .load_backend import BACKEND +from .load_backend import BACKEND_VERSION + +from .load_backend import Reshape +from .load_backend import ReduceSum +from .load_backend import ReduceMax +from .load_backend import ReduceMean +from .load_backend import OneHot +from .load_backend import L2Normalize +from .load_backend import EmbeddingLookup +from .load_backend import NCELoss +from .load_backend import Not_equal +from .load_backend import Cast +from .load_backend import ExpandDims +from .load_backend import Count_nonzero +from .load_backend import FlattenReshape +from .load_backend import Transpose +from .load_backend import MatMul +from .load_backend import Tile +from .load_backend import Concat +from .load_backend import ZeroPadding1D +from .load_backend import ZeroPadding2D +from .load_backend import ZeroPadding3D +from .load_backend import Stack +from .load_backend import Unstack +from .load_backend import Sign +from .load_backend import Resize diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py new file mode 100644 index 000000000..2b7b8a03b --- /dev/null +++ b/tensorlayer/backend/ops/dragon_backend.py @@ -0,0 +1,989 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function + +import numpy as np +import dragon as D + +from dragon.core.eager import context +from dragon.core.ops import init_ops +from dragon.core.ops import vision_ops + +_dtypeDict = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] +# TODO NotImplemented +DType = None +float16 = 'float16' +float32 = 'float32' +float64 = 'float64' +int8 = 'int8' +int16 = 'int16' +int32 = 'int32' +int64 = 'int64' +uint8 = 'uint8' +uint16 = 'uint16' +uint32 = 'uint32' +uint64 = 'uint64' + +# isinstance input output +# TODO NotImplemented +# TensorLike = None + + +def _getter(init_fn, **kwargs): + """Return an named eager tensor.""" + with context.eager_mode(): + value = init_fn(**kwargs) + value._name = kwargs.get('name', value.id) + return value + + +def set_context(**kwargs): + raise Exception("Using Dragon backend,You don't need to set context") + + +def get_tensor_shape(x): + return x.shape + + +# initializers +def zeros(shape, dtype='float32'): + """ + Creates a tensor with all elements set to zero. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return _getter( + init_ops.fill, + value=0, + shape=shape, + dtype=dtype, + ) + + +def ones(shape, dtype='float32'): + """ + Creates a tensor with all elements set to ones. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return _getter( + init_ops.fill, + value=1, + shape=shape, + dtype=dtype, + ) + + +def constant(value, shape, dtype='float32'): + """ + Creates a constant tensor from a tensor-like object. + + Parameters + ---------- + value : list + A constant value (or list) of output type dtype. + dtype : tensor + The type of the elements of the resulting tensor. + shape : tuple + Optional dimensions of resulting tensor. + + Returns + ------- + A Constant Tensor. + + """ + # shape = shape[::-1] + return _getter( + init_ops.fill, + value=value, + shape=shape, + dtype=dtype, + ) + + +def random_uniform(shape, minval=0, maxval=None, dtype='float32', seed=None): + """ + Outputs random values from a uniform distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + minval : int + The lower bound on the range of random values to generate (inclusive). Defaults to 0. + maxval : int + The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. + dtype : tensor + The type of the output: float16, float32, float64, int32, or int64. + seed : int + Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls. + Returns + ------- + A tensor of the specified shape filled with random uniform values. + + """ + return _getter(init_ops.random_uniform, low=minval, high=maxval, shape=shape, dtype=dtype) + + +def random_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): + """ + Outputs random values from a normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random normal values. + + """ + return _getter( + init_ops.random_normal, + mean=mean, + std=stddev, + shape=shape, + dtype=dtype, + ) + + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): + """ + Outputs random values from a truncated normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random truncated normal values. + + """ + return _getter( + init_ops.truncated_normal, + mean=mean, + std=stddev, + shape=shape, + dtype=dtype, + ) + + +def he_normal(shape, dtype, seed=None): + """ + He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + dtype : tensor + The type of the output. + + Returns + ------- + A tensor of the specified shape filled with he normal values. + """ + # shape = shape[::-1] + raise NotImplementedError("He_Normal is not implemented") + + +def Variable(initial_value, name, trainable=None): + """ + Creates a new variable with value initial_value. + + Parameters + ---------- + initial_value : tensor + A Tensor, or Python object convertible to a Tensor + name : str + Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. + Returns + ------- + Variable + """ + return D.Tensor(name=name, shape=initial_value) + + +class MatMul(object): + + def __init__(self): + pass + + def __call__(self, a, b): + inputs = [a, b] + return D.math.matmul(inputs) + + +def matmul(a, b): + """ + Multiplies matrix a by matrix b, producing a * b. + + Parameters + ---------- + a : tensor + type float16, float32, float64, int32, complex64, complex128 and rank > 1. + b : tensor + with same type and rank as a. + + Returns + ------- + A Tensor of the same type as a and b + """ + inputs = [a, b] + return D.math.matmul(inputs) + + +def add(value, bias): + """ + Returns x + y element-wise. + + Parameters + ---------- + value : tensor. + Must be one of the following types: bfloat16, half, float32, float64, + uint8, int8, int16, int32, int64, complex64, complex128, string. + bias : tensor + Must have the same type as a + name : str + A name for the operation + + Returns + ------- + A Tensor. Has the same type as a. + """ + + inputs = [value, bias] + return D.math.add(inputs) + + +def dtypes(dt): + """ + Data dtypes. + + Parameters + ---------- + dt : string + It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', + 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. + + Returns + ------- + Data dtypes + """ + if dt not in _dtypeDict: + raise Exception("Unsupported dtype: {}".format(dt)) + return dt + + +def minimum(x, y): + """ + Returns the min of x and y (i.e. x < y ? x : y) element-wise. + + Parameters + ---------- + x : tensor. + Must be one of the following types: bfloat16, half, float32, float64, int32, int64. + y : A Tensor. + Must have the same type as x. + name : str + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as x + """ + inputs = [x, y] + return D.math.minimum(inputs) + + +class FlattenReshape(object): + + def __init__(self): + pass + + def __call__(self, inputs): + dim = 1 + for d in get_tensor_shape(inputs)[1:]: + dim *= d + return D.reshape(inputs, [-1, dim]) + + +class Reshape(object): + + def __init__(self, shape): + self.shape = shape + + def __call__(self, tensor): + return D.reshape(tensor, shape=self.shape) + + +def reshape(tensor, shape): + """ + Reshapes a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + shape : tensor + Defines the shape of the output tensor. + Returns + ------- + A Tensor. Has the same type as tensor + """ + return D.reshape(tensor, shape=shape) + + +class Concat(object): + + def __init__(self, axis): + super(Concat, self).__init__() + self.axis = axis + + def __call__(self, values): + return D.concat(values=values, axis=self.axis) + + +def concat(values, axis): + """ + Concatenates tensors along one dimension. + + Parameters + ---------- + values : list + A list of Tensor objects or a single Tensor + axis : int + 0-D int32 Tensor. Dimension along which to concatenate + Returns + ------- + A Tensor resulting from concatenation of the input tensors. + """ + return D.concat(values, axis=axis) + + +def convert_to_tensor(value, dtype=None): + """ + Converts the given value to a Tensor. + + Parameters + ---------- + value : object + An object whose type has a registered Tensor conversion function. + dtype : optional + Optional element type for the returned tensor. If missing, the type is inferred from the type of value. + + Returns + ------- + A Tensor based on value. + """ + return D.Tensor.convert_to(value, dtype) + + +def sqrt(x): + """ + Computes square root of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + return D.math.sqrt(x) + + +class ReduceSum(object): + + def __init__(self, axis): + pass + + def construct(self, input): + pass + + +class ReduceMean(object): + + def __init__(self, axis): + if axis == [1, 2]: + self.data_format = 'NHWC' + elif axis == [2, 3]: + self.data_format = 'NCHW' + else: + raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + + def __call__(self, inputs): + return vision_ops.pool2d( + inputs, + kernel_shape=1, + strides=1, + pads=0, + mode='AVG', + global_pooling=True, + data_format=self.data_format, + ) + + +def reduce_mean(input_tensor, axis=None): + """ + Computes the mean of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + return D.mean(input_tensor, axes=axis) + + +class ReduceMax(object): + + def __init__(self, axis): + if axis == [1, 2]: + self.data_format = 'NHWC' + elif axis == [2, 3]: + self.data_format = 'NCHW' + else: + raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + + def __call__(self, inputs): + return vision_ops.pool2d( + inputs, kernel_shape=1, strides=1, pads=0, mode='MAX', global_pooling=True, data_format=self.data_format + ) + + +def reduce_max(input_tensor, axis=None): + """ + Computes the maximum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + return D.max(input_tensor, axis) + + +def reduce_min(input_tensor, axis=None): + """ + Computes the minimum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + return D.min(input_tensor, axis) + + +def pad(tensor, paddings, mode='CONSTANT', constant_values=0): + """ + Pads a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + paddings : tuple + A tuple of type int32. + mode : str + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + constant_values : int + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + + Returns + ------- + A Tensor. Has the same type as tensor. + """ + if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: + raise Exception("Unsupported mode: {}".format(mode)) + if mode == 'SYMMETRIC': + mode = 'EDGE' + outputs = D.pad(tensor, pads=paddings, mode=mode, value=constant_values) + return outputs + + +class Unstack(object): + + def __init__(self, axis, num=None): + self.axis = axis + self.num = num + + def __call__(self, values): + raise NotImplementedError + + +class Stack(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, values): + return D.stack(values, axis=self.axis) + + +def stack(values, axis=0): + """ + Stacks a list of rank-R tensors into one rank-(R+1) tensor. + + Parameters + ---------- + values : list + A list of Tensor objects with the same shape and type. + axis : int + An int. The axis to stack along. Defaults to the first dimension. + Negative values wrap around, so the valid range is [-(R+1), R+1). + + Returns + ------- + A stacked Tensor with the same type as values. + """ + return D.stack(values, axis=axis) + + +def meshgrid(x, y): + """ + Broadcasts parameters for evaluation on an N-D grid. + + Parameters + ---------- + x : tensor + Tensors with rank 1. + y : tensor + Tensors with rank 1. + + Returns + ------- + A list of N Tensors with rank N. + """ + + pass + + +def range(start, limit=None, delta=1, dtype=None): + """ + Creates a sequence of numbers. + + Parameters + ---------- + start : tensor + A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; + otherwise, acts as range limit and first entry defaults to 0. + limit : tensor + A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, + defaults to the value of start while the first entry of the range defaults to 0. + delta : tensor + A 0-D Tensor (scalar). Number that increments start. Defaults to 1. + dtype : type + The type of the elements of the resulting tensor. + + Returns + ------- + An 1-D Tensor of type dtype. + """ + if dtype is None: + dtype = 'int32' + if limit is None: + outputs = D.arange(start=0, stop=start, step=delta, dtype=dtype) + else: + outputs = D.arange(start, stop=limit, step=delta, dtype=dtype) + return outputs + + +class ExpandDims(object): + + def __init__(self, axis): + pass + + def construct(self, input): + pass + + +def expand_dims(input, axis): + """ + Inserts a dimension of 1 into a tensor's shape. + + Parameters + ---------- + input : tensor + A Tensor. + axis : int + 0-D (scalar). Specifies the dimension index at which to expand the shape of input. + Must be in the range [-rank(input) - 1, rank(input)]. + + Returns + ------- + A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. + """ + + return D.expand_dims(input, axis=axis) + + +class Tile(object): + + def __init__(self): + pass + + def __call__(self, input, multiples): + return D.tile(input, multiples) + + +def tile(input, multiples): + """ + Constructs a tensor by tiling a given tensor. + + Parameters + ---------- + input : tensor + A Tensor. 1-D or higher. + multiples : tensor + Must be one of the following types: int32, int64. 1-D. + Length must be the same as the number of dimensions in input + + Returns + ------- + A Tensor. Has the same type as input. + """ + return D.tile(input, multiples) + + +class Cast(object): + + def __init__(self, dtype): + pass + + def __call__(self, input): + pass + + +def cast(x, dtype): + """ + Casts a tensor to a new type. + + Parameters + ---------- + x : tensor + A Tensor or SparseTensor or IndexedSlices of numeric type. + It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. + dtype : dtpye + The destination type. The list of supported dtypes is the same as x + + Returns + ------- + A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. + """ + return D.cast(x, dtype=dtype) + + +class Transpose(object): + + def __init__(self, perm, conjugate=False): + self.perm = perm + if conjugate: + raise ("The conjugate Parameters not supported") + + def __call__(self, a): + return D.transpose(a, self.perm) + + +def transpose(a, perm=None, conjugate=False): + """ + Transposes a. + + Parameters + ---------- + a : tensor + A Tensor. + perm : int + A permutation of the dimensions of a. + conjugate : bool + Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)). + + Returns + ------- + A transposed Tensor. + """ + + conjugate = conjugate + return D.transpose(a, perm=perm) + + +def gather_nd(params, indices, batch_dims=0): + """ + Gather slices from params into a Tensor with shape specified by indices. + + Parameters + ---------- + params : tensor + The tensor from which to gather values. + indices : tensor + Must be one of the following types: int32, int64. Index tensor. + batch_dims : int + An integer or a scalar 'Tensor'. The number of batch dimensions. + + Returns + ------- + A Tensor. Has the same type as params. + """ + + pass + + +def clip_by_value(t, clip_value_min, clip_value_max): + """ + Clips tensor values to a specified min and max. + + Parameters + ---------- + t : tensor + A Tensor or IndexedSlices + clip_value_min : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + clip_value_max : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + + Returns + ------- + A clipped Tensor or IndexedSlices. + """ + + pass + + +def split(value, num_or_size_splits, axis=0, num=None): + """ + Splits a tensor into sub tensors. + + Parameters + ---------- + value : tensor + The Tensor to split. + num_or_size_splits : list + Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or + Python list containing the sizes of each output tensor along split_dim. + axis : int + The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. + num : int + used to specify the number of outputs when it cannot be inferred from the shape of size_splits. + + Returns + ------- + Tensor objects resulting from splitting value. + """ + pass + + +def floor(x): + return D.math.floor(x) + + +def gather(params, indices): + return NotImplementedError + + +def linspace(start, stop, num): + return D.linspace(start, stop, num) + + +def slice(inputs, starts, sizes): + return D.slice(inputs, starts, sizes) + + +def add_n(inputs): + return NotImplementedError + + +class OneHot(object): + + def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype='float32'): + self.depth = depth + self.dtype = dtype + + def __call__(self, indices): + outputs = np.zeros(shape=(indices.shape[0], self.depth)) + for i in np.arange(indices.shape[0]): + outputs[int(i)][int(indices[int(i)].get_value())] = 1 + outputs = D.constant(outputs, dtype=self.dtype) + return outputs + + +class L2Normalize(object): + + def __init__(self, axis=None, epsilon=1e-12): + super(L2Normalize, self).__init__() + pass + + def __call__(self, input, *args, **kwargs): + pass + + +class EmbeddingLookup(object): + + def __init__(self, max_norm=None): + self.max_norm = max_norm + + def __call__(self, params, ids, *args, **kwargs): + pass + + +class NCELoss(object): + + def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): + super(NCELoss, self).__init__() + + def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): + pass + + +class Not_equal(object): + + def __init__(self): + pass + + def __call__(self, x, y): + pass + + +class Count_nonzero(object): + + def __init__(self, keepdims=None, dtype='int64'): + pass + + def __call__(self, *args, **kwargs): + pass + + +class Resize: + + def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): + if method not in ['nearest', 'linear', 'bilinear']: + raise ('Current resize does not support this method.') + if method == 'bilinear': + method = 'linear' + self.method = method + self.antialias = antialias + self.scale = scale + if data_format != 'channel_last': + raise Exception("UpSampling2d resize_images only support channel_last") + + def __call__(self, inputs): + output_size = (int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1])) + outputs = D.vision.resize(inputs, sizes=output_size, mode=self.method, align_corners=self.antialias) + return outputs + + +def resize(inputs, output_size, method, antialias): + if method not in ['nearest', 'linear', 'bilinear']: + raise ('Current resize does not support this method.') + if method == 'bilinear': + method = 'linear' + return D.vision.resize(inputs, sizes=output_size, mode=method, align_corners=antialias) + + +class ZeroPadding1D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class ZeroPadding2D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class ZeroPadding3D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class Sign(object): + + def __init__(self): + pass + + def __call__(self, x): + return D.math.sign(x) diff --git a/tensorlayer/backend/ops/dragon_nn.py b/tensorlayer/backend/ops/dragon_nn.py new file mode 100644 index 000000000..e6b5105ef --- /dev/null +++ b/tensorlayer/backend/ops/dragon_nn.py @@ -0,0 +1,910 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +import dragon as D +from dragon.core.ops import vision_ops +from dragon.core.ops import activation_ops + + +def padding_format(padding): + """ + Checks that the padding format correspond format. + + Parameters + ---------- + padding : str + Must be one of the following:"same", "SAME", "VALID", "valid" + + Returns + ------- + str "SAME" or "VALID" + """ + + if padding in ["SAME", "same"]: + padding = "SAME" + elif padding in ["VALID", "valid"]: + padding = "VALID" + elif padding == None: + padding = None + else: + raise Exception("Unsupported padding: " + str(padding)) + return padding + + +def preprocess_1d_format(data_format, padding): + """ + Checks that the 1-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NWC","NCW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NWC" or "NCW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NWC"]: + data_format = "NWC" + elif data_format in ["channels_first", "NCW"]: + data_format = "NCW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_2d_format(data_format, padding): + """ + Checks that the 2-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NHWC","NCHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NHWC" or "NCHW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NHWC", "nhwc"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW", "nchw"]: + data_format = "NCHW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_3d_format(data_format, padding): + """ + Checks that the 3-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NDHWC" or "NCDHW" and "SAME" or "VALID" + """ + + if data_format in ['channels_last', 'NDHWC']: + data_format = 'NDHWC' + elif data_format in ['channels_first', 'NCDHW']: + data_format = 'NCDHW' + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def nchw_to_nhwc(x): + """ + Channels first to channels last + + Parameters + ---------- + x : tensor + channels first tensor data + + Returns + ------- + channels last tensor data + """ + + pass + + +def nhwc_to_nchw(x): + """ + Channles last to channels first + + Parameters + ---------- + x : tensor + channels last tensor data + + Returns + ------- + channels first tensor data + """ + + pass + + +class ReLU(object): + + def __init__(self): + pass + + def __call__(self, x): + return D.nn.relu(x) + + +def relu(x): + """ + Computes rectified linear: max(features, 0). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor. Has the same type as features. + """ + return D.nn.relu(x) + + +class ReLU6(object): + + def __init__(self): + pass + + def __call__(self, x): + return D.nn.relu6(x) + + +def relu6(x): + """ + Computes Rectified Linear 6: min(max(features, 0), 6). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor with the same type as features. + """ + return D.nn.relu6(x) + + +class LeakyReLU(object): + + def __init__(self, alpha=0.2): + self.alpha = alpha + + def __call__(self, x): + return D.nn.leaky_relu(x, alpha=self.alpha) + + +def leaky_relu(x): + """ + Compute the Leaky ReLU activation function. + + Parameters + ---------- + x : tensor + representing preactivation values. Must be one of the following types: + float16, float32, float64, int32, int64. + + Returns + ------- + The activation value. + """ + + return D.nn.leaky_relu(x) + + +class Softplus(object): + + def __init__(self): + pass + + def __call__(self, x): + raise NotImplementedError + + +def softplus(x): + """ + Computes softplus: log(exp(features) + 1). + + Parameters + ---------- + x : tensor + Must be one of the following types: half, bfloat16, float32, float64. + + Returns + ------- + A Tensor. Has the same type as features. + """ + + raise NotImplementedError + + +class Tanh(object): + + def __init__(self): + pass + + def __call__(self, x): + return activation_ops.tanh(x) + + +def tanh(x): + """ + Computes hyperbolic tangent of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + + return activation_ops.tanh(x) + + +class Sigmoid(object): + + def __init__(self): + pass + + def __call__(self, x): + return activation_ops.sigmoid(x) + + +def sigmoid(x): + """ + Computes sigmoid of x element-wise. + + Parameters + ---------- + x : tensor + A Tensor with type float16, float32, float64, complex64, or complex128. + + Returns + ------- + A Tensor with the same type as x. + """ + return activation_ops.sigmoid(x) + + +class Softmax(object): + + def __init__(self): + pass + + def __call__(self, x): + return D.nn.softmax(x) + + +def softmax(logits, axis=None): + """ + Computes softmax activations. + + Parameters + ---------- + logits : tensor + Must be one of the following types: half, float32, float64. + axis : int + The dimension softmax would be performed on. The default is -1 which indicates the last dimension. + + Returns + ------- + A Tensor. Has the same type and shape as logits. + """ + return D.nn.softmax(logits) + + +class Dropout(object): + + def __init__(self, keep, seed=1): + self.keep = 1 - keep + self.seed = seed + + def __call__(self, inputs): + return D.nn.dropout(inputs, prob=self.keep) + + +class BiasAdd(object): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + Returns + ------- + A Tensor with the same type as value. + """ + + def __init__(self, data_format='NHWC'): + self.data_format = data_format + + def __call__(self, x, bias): + inputs = [x, bias] + return vision_ops.bias_add(inputs, data_format=self.data_format) + + +def bias_add(x, bias): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + data_format : A string. + 'N...C' and 'NC...' are supported. + name : str + A name for the operation (optional). + Returns + ------- + A Tensor with the same type as value. + """ + inputs = [x, bias] + return vision_ops.bias_add(inputs, data_format='NHWC') + + +class Conv1D(object): + pass + # raise NotImplementedError + + +def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): + """ + Computes a 1-D convolution given 3-D input and filter tensors. + + Parameters + ---------- + input : tensor + A 3D Tensor. Must be of type float16, float32, or float64 + filters : tensor + A 3D Tensor. Must have the same type as input. + stride : int of list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'SAME' or 'VALID' + data_format : string + An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of + [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. + dilations : int or list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + Returns + ------- + A Tensor. Has the same type as input. + """ + + pass + + +class Conv2D(object): + + def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.ksize = k_size[0] + if self.data_format is 'NHWC': + self.dg_stride = strides[1] + self.dg_dilation = dilations[1] + elif self.data_format is 'NCHW': + self.dg_stride = strides[2] + self.dg_dilation = dilations[2] + + def __call__(self, inputs, filters): + outputs = vision_ops.conv2d( + [inputs, filters], + kernel_shape=self.ksize, + strides=self.dg_stride, + padding=self.padding, + dilations=self.dg_dilation, + data_format=self.data_format, + ) + return outputs + + +def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): + """ + Computes a 2-D convolution given 4-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. + The dimension order is interpreted according to the value of data_format, see below for details. + filters : tensor + Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] + strides : int of list + The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. + padding : string + "SAME" or "VALID" + data_format : string + "NHWC", "NCHW". Defaults to "NCHW". + dilations : list or ints + list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. + + Returns + ------- + A Tensor. Has the same type as input. + """ + raise NotImplementedError + + +class Conv3D(object): + pass + # raise NotImplementedError + + +def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): + """ + Computes a 3-D convolution given 5-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. + Shape [batch, in_depth, in_height, in_width, in_channels]. + filters : tensor + Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. + in_channels must match between input and filters. + strides : list of ints + A list of ints that has length >= 5. 1-D tensor of length 5. + The stride of the sliding window for each dimension of input. + Must have strides[0] = strides[4] = 1. + padding : string + A string from: "SAME", "VALID". The type of padding algorithm to use. + data_format : string + An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + dilations : list of ints + Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. + If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of data_format, see above for details. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as input. + """ + + raise NotImplementedError + + +def lrn(inputs, depth_radius, bias, alpha, beta): + """ + Local Response Normalization. + + Parameters + ---------- + inputs : tensor + Must be one of the following types: half, bfloat16, float32. 4-D. + depth_radius : int + Defaults to 5. 0-D. Half-width of the 1-D normalization window. + bias : float + Defaults to 1. An offset (usually positive to avoid dividing by 0). + alpha : float + Defaults to 1. A scale factor, usually positive. + beta : float + Defaults to 0.5. An exponent. + + Returns + ------- + A Tensor. Has the same type as input. + """ + pass + + +def moments(x, axes, shift=None, keepdims=False): + """ + Calculates the mean and variance of x. + + Parameters + ---------- + x : tensor + A Tensor + axes : ints + Axes along which to compute mean and variance. + shift : int + Not used in the current implementation. + keepdims : bool + produce moments with the same dimensionality as the input. + + Returns + ------- + Two Tensor objects: mean and variance. + """ + + pass + + +class MaxPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + return vision_ops.pool2d( + inputs, + kernel_shape=self.ksize, + strides=self.strides, + padding=self.padding, + mode='MAX', + global_pooling=False, + data_format=self.data_format, + ) + + +def max_pool(input, ksize, strides, padding, data_format=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start + with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". + Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + pass + + +class AvgPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.filter_size = ksize + self.strides = strides + + def __call__(self, inputs): + return vision_ops.pool2d( + inputs, + kernel_shape=self.filter_size, + strides=self.strides, + padding=self.padding, + mode='AVG', + global_pooling=False, + data_format=self.data_format, + ) + + +def avg_pool(input, ksize, strides, padding): + """ + Performs the avg pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The average pooled output tensor. + """ + pass + + +def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of the format specified by data_format. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + pass + + +def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the average pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + name : string + Optional name for the operation. + + Returns + ------- + A Tensor with the same type as value. The average pooled output tensor. + """ + pass + + +def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): + """ + Performs an N-D pooling operation. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + window_shape : int + Sequence of N ints >= 1. + pooling_type : string + Specifies pooling operation, must be "AVG" or "MAX". + strides : ints + Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. + padding : string + The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". + See the "returns" section of tf.ops.convolution for details. + data_format : string + Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), + or the second dimension (if data_format starts with "NC"). + For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations : list of ints + Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. + name : string + Optional. Name of the op. + + Returns + ------- + Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] + """ + pass + + +class DepthwiseConv2d(object): + + def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.stride = strides + self.dilations = dilations + + def __call__(self, input, filter): + raise NotImplementedError("Not implemented depthwiseconv2d") + + +def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): + """ + Depthwise 2-D convolution. + + Parameters + ---------- + input : tensor + 4-D with shape according to data_format. + filter : tensor + 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. + strides : list + 1-D of size 4. The stride of the sliding window for each dimension of input. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + The data format for input. Either "NHWC" (default) or "NCHW". + dilations : list + 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. + If it is greater than 1, then all values of strides must be 1. + name : string + A name for this operation (optional). + + Returns + ------- + A 4-D Tensor with shape according to data_format. + E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. + """ + + pass + + +def conv1d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None +): + """ + The transpose of conv1d. + + Parameters + ---------- + input : tensor + A 3-D Tensor of type float and shape [batch, in_width, in_channels] + for NWC data format or [batch, in_channels, in_width] for NCW data format. + filters : tensor + A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NWC' and 'NCW' are supported. + dilations : list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + pass + + +def conv2d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None +): + """ + The transpose of conv2d. + + Parameters + ---------- + input : tensor + A 4-D Tensor of type float and shape [batch, height, width, in_channels] + for NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 4-D Tensor with the same type as input and shape [height, width, + output_channels, in_channels]. filter's in_channels dimension must match that of input. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. + If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 0. + The dimension order is determined by the value of data_format, see below for details. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NHWC' and 'NCHW' are supported. + dilations : list + An int or list of ints that has length 1, 2 or 4, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as input. + """ + pass + + +def conv3d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None +): + """ + The transpose of conv3d. + + Parameters + ---------- + input : tensor + A 5-D Tensor of type float and shape [batch, height, width, in_channels] for + NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 3 or 5. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + dilations : list of ints + An int or list of ints that has length 1, 3 or 5, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + + pass + + +class BatchNorm(object): + + def __init__(self): + pass + + def __call__(self, *args, **kwargs): + pass diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py new file mode 100644 index 000000000..7f0e25332 --- /dev/null +++ b/tensorlayer/backend/ops/load_backend.py @@ -0,0 +1,73 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import json +import os +import sys + +BACKEND = 'tensorflow' +# BACKEND = 'mindspore' +# BACKEND = 'dragon' + +# Check for backend.json files +tl_backend_dir = os.path.expanduser('~') +if not os.access(tl_backend_dir, os.W_OK): + tl_backend_dir = '/tmp' +tl_dir = os.path.join(tl_backend_dir, '.tl') + +config = { + 'backend': BACKEND, +} +if not os.path.exists(tl_dir): + path = os.path.join(tl_dir, 'tl_backend.json') + os.makedirs(tl_dir) + with open(path, "w") as f: + json.dump(config, f) + BACKEND = config['backend'] + sys.stderr.write("Create the backend configuration file :" + path + '\n') +else: + path = os.path.join(tl_dir, 'tl_backend.json') + with open(path, 'r') as load_f: + load_dict = json.load(load_f) + if load_dict['backend'] is not config['backend']: + BACKEND = config['backend'] + else: + BACKEND = load_dict['backend'] + +# Set backend based on TL_BACKEND flag. +if 'TL_BACKEND' in os.environ: + backend = os.environ['TL_BACKEND'] + if backend: + BACKEND = backend + +# import backend functions +if BACKEND == 'tensorflow': + from .tensorflow_backend import * + from .tensorflow_nn import * + import tensorflow as tf + BACKEND_VERSION = tf.__version__ + sys.stderr.write('Using TensorFlow backend.\n') + +elif BACKEND == 'mindspore': + from .mindspore_backend import * + from .mindspore_nn import * + import mindspore as ms + BACKEND_VERSION = ms.__version__ + # set context + import mindspore.context as context + import os + os.environ['DEVICE_ID'] = '0' + context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') + # enable_task_sink=True, enable_loop_sink=True) + # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', + # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) + sys.stderr.write('Using MindSpore backend.\n') + +elif BACKEND == 'dragon': + from .dragon_backend import * + from .dragon_nn import * + import dragon as dg + BACKEND_VERSION = dg.__version__ + sys.stderr.write('Using Dragon backend.\n') +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py new file mode 100644 index 000000000..d54a4c70f --- /dev/null +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -0,0 +1,1131 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +from .mindspore_nn import nchw_to_nhwc, nhwc_to_nchw +from mindspore._c_expression.typing import Type +from mindspore.common import dtype as mstype + +from mindspore.common.parameter import Parameter +from mindspore.common.initializer import ( + initializer, Constant, Normal, TruncatedNormal, Initializer, _assignment, _calculate_in_and_out, One, Zero +) +from mindspore.common.tensor import Tensor +from mindspore._c_expression import Tensor as Tensor_ +from mindspore.ops import operations as P +from mindspore.ops import functional as F +import mindspore.context as context +from mindspore.nn import Cell + +import numpy as np +from scipy.stats import truncnorm +import random + +_dtypeDict = { + 'DType': Type, + 'float16': mstype.float16, + 'float32': mstype.float32, + 'float64': mstype.float64, + 'int8': mstype.int8, + 'int16': mstype.int16, + 'int32': mstype.int32, + 'int64': mstype.int64, + 'uint8': mstype.uint8, + 'uint16': mstype.uint16, + 'uint32': mstype.uint32, + 'uint64': mstype.uint64 +} + +DType = Type +float16 = mstype.float16 +float32 = mstype.float32 +float64 = mstype.float64 +int8 = mstype.int8 +int16 = mstype.int16 +int32 = mstype.int32 +int64 = mstype.int64 +uint8 = mstype.uint8 +uint16 = mstype.uint16 +uint32 = mstype.uint32 +uint64 = mstype.uint64 + +# isinstance input output +# TensorLike = Tensor_ + + +def set_context(**kwargs): + return context.set_context(**kwargs) + + +def get_tensor_shape(x): + return list(P.Shape()(x)) + + +# initializers +def zeros(shape, dtype=mstype.float32): + """ + Creates a tensor with all elements set to zero. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = Zero() + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +def ones(shape, dtype=mstype.float32): + """ + Creates a tensor with all elements set to ones. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = One() + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +def constant(value, dtype=mstype.float32, shape=None): + """ + Creates a constant tensor from a tensor-like object. + + Parameters + ---------- + value : list + A constant value (or list) of output type dtype. + dtype : tensor + The type of the elements of the resulting tensor. + shape : tuple + Optional dimensions of resulting tensor. + + Returns + ------- + A Constant Tensor. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + Constant(value)(arr=arr) + return Tensor(arr, dtype=dtype) + + +class Uniform(Initializer): + """ + Initialize a uniform array, and obtain values U(-scale, scale) from the uniform distribution + to fill the input tensor. + + Args: + minval : int + The lower bound on the range of random values to generate (inclusive). Defaults to 0. + maxval : int + The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. + seed : int + Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls. + + Returns: + Array, uniform array. + """ + + def __init__(self, minval=0, maxval=None, seed=None): + super(Uniform, self).__init__(minval=minval, maxval=maxval, seed=seed) + self.minval = minval + self.maxval = maxval + self.seed = seed + + def _initialize(self, arr): + random.seed(self.seed) + tmp = np.random.uniform(self.minval, self.maxval, arr.shape) + _assignment(arr, tmp) + + +def random_uniform(shape, minval=0, maxval=None, dtype=mstype.float32, seed=None): + """ + Outputs random values from a uniform distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + minval : int + The lower bound on the range of random values to generate (inclusive). Defaults to 0. + maxval : int + The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. + dtype : tensor + The type of the output: float16, float32, float64, int32, or int64. + seed : int + Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls. + Returns + ------- + A tensor of the specified shape filled with random uniform values. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = Uniform(minval=minval, maxval=maxval, seed=seed) + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +class Normal(Initializer): + """ + Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution + to fill the input tensor. + + Parameters + ---------- + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + seed : A Python integer + Used to create a random seed for the distribution + + Returns: + Array, normal array. + """ + + def __init__(self, mean=0.0, stddev=0.01, seed=None): + super(Normal, self).__init__(mean=mean, stddev=stddev) + self.mean = mean + self.stddev = stddev + self.seed = seed + + def _initialize(self, arr): + random.seed(self.seed) + tmp = np.random.normal(self.mean, self.stddev, arr.shape) + _assignment(arr, tmp) + + +def random_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None): + """ + Outputs random values from a normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random normal values. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = Normal(mean=mean, stddev=stddev, seed=seed) + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +class TruncatedNormal(Initializer): + """ + Initialize a truncated normal distribution which is a bounded normal distribution within N(low, high). + + Args: + sigma (float): The sigma of the array. Default: 0.01. + + Returns: + Array, truncated normal array. + """ + + def __init__(self, mean=0.0, stddev=0.01, seed=None): + super(TruncatedNormal, self).__init__(mean=mean, stddev=stddev, seed=seed) + self.mean = mean + self.stddev = stddev + self.seed = seed + + def _initialize(self, arr): + tmp = truncnorm.rvs(-2, 2, loc=self.mean, scale=self.stddev, size=arr.shape, random_state=None) + _assignment(arr, tmp) + + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None): + """ + Outputs random values from a truncated normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random truncated normal values. + + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = TruncatedNormal(mean=mean, stddev=stddev, seed=seed) + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +class HeNormal(Initializer): + r""" + he_normal: It draws samples from a truncated normal distribution centered on 0 with + stddev = sqrt(2 / fan_in) where fan_in is the number of input units in the weight tensor. + + Args: + arr (Array): The array to be assigned. + + Returns: + Array, assigned array. + """ + + def __init__(self, seed=None): + super(HeNormal, self).__init__(seed=seed) + self.seed = seed + + def _initialize(self, arr): + n_in, _ = _calculate_in_and_out(arr) + boundary = np.sqrt(2.0 / n_in) + random.seed(self.seed) + data = np.random.normal(-boundary, boundary, arr.shape) + _assignment(arr, data) + + +def he_normal(shape, dtype, seed=None): + """ + He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + dtype : tensor + The type of the output. + + Returns + ------- + A tensor of the specified shape filled with he normal values. + """ + # shape = shape[::-1] + arr = np.ndarray(shape) + init_obj = HeNormal(seed) + init_obj(arr) + return Tensor(arr, dtype=dtype) + + +def Variable(initial_value, name, trainable=True): + """ + Creates a new variable with value initial_value. + + Parameters + ---------- + initial_value : tensor + A Tensor, or Python object convertible to a Tensor + name : str + Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. + Returns + ------- + Variable + """ + + var = Parameter(initial_value, name=name, requires_grad=trainable) + return var + + +class MatMul(Cell): + + def __init__(self): + super(MatMul, self).__init__() + self.matmul = P.MatMul() + + def construct(self, a, b): + return self.matmul(a, b) + + +def matmul(a, b): + """ + Multiplies matrix a by matrix b, producing a * b. + + Parameters + ---------- + a : tensor + type float16, float32, float64, int32, complex64, complex128 and rank > 1. + b : tensor + with same type and rank as a. + + Returns + ------- + A Tensor of the same type as a and b + """ + matmul_obj = P.MatMul() + outputs = matmul_obj(a, b) + return outputs + + +def add(value, bias): + """ + Returns x + y element-wise. + + Parameters + ---------- + value : tensor. + Must be one of the following types: bfloat16, half, float32, float64, + uint8, int8, int16, int32, int64, complex64, complex128, string. + bias : tensor + Must have the same type as a + name : str + A name for the operation + + Returns + ------- + A Tensor. Has the same type as a. + """ + + add_obj = P.TensorAdd() + outputs = add_obj(value, bias) + return outputs + + +def dtypes(dt): + """ + Data dtypes. + + Parameters + ---------- + dt : string + It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', + 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. + + Returns + ------- + Data dtypes + """ + + if dt not in _dtypeDict.keys(): + raise Exception("Unsupported dtype: {}".format(dt)) + return _dtypeDict[dt] + + +def minimum(x, y): + """ + Returns the min of x and y (i.e. x < y ? x : y) element-wise. + + Parameters + ---------- + x : tensor. + Must be one of the following types: bfloat16, half, float32, float64, int32, int64. + y : A Tensor. + Must have the same type as x. + name : str + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as x + """ + minimum_obj = P.Minimum() + outputs = minimum_obj(x, y) + return outputs + + +class FlattenReshape(Cell): + + def __init__(self): + super(FlattenReshape, self).__init__() + self.shape = P.Shape() + self.reshape = P.Reshape() + + def construct(self, inputs): + dim = 1 + for d in self.shape(inputs)[1:]: + dim *= d + return self.reshape(inputs, (-1, dim)) + + +class Reshape(Cell): + + def __init__(self, shape): + super(Reshape, self).__init__() + self.reshape = P.Reshape() + self.shape = tuple(shape) + + def construct(self, tensor): + return self.reshape(tensor, self.shape) + + +def reshape(tensor, shape): + """ + Reshapes a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + shape : tensor + Defines the shape of the output tensor. + Returns + ------- + A Tensor. Has the same type as tensor + """ + reshape_obj = P.Reshape() + outputs = reshape_obj(tensor, tuple(shape)) + return outputs + + +class Concat(Cell): + + def __init__(self, axis): + super(Concat, self).__init__() + self.concat = P.Concat(axis) + + def construct(self, values): + return self.concat(values) + + +def concat(values, axis): + """ + Concatenates tensors along one dimension. + + Parameters + ---------- + values : list + A list of Tensor objects or a single Tensor + axis : int + 0-D int32 Tensor. Dimension along which to concatenate + Returns + ------- + A Tensor resulting from concatenation of the input tensors. + """ + # TODO testing axis + concat_obj = P.Concat(axis) + outputs = concat_obj(values) + return outputs + + +def convert_to_tensor(value, dtype=None): + """ + Converts the given value to a Tensor. + + Parameters + ---------- + value : object + An object whose type has a registered Tensor conversion function. + dtype : optional + Optional element type for the returned tensor. If missing, the type is inferred from the type of value. + + Returns + ------- + A Tensor based on value. + """ + #todo testing value + return Tensor(value, dtype=dtype) + + +def sqrt(x): + """ + Computes square root of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + sqrt_obj = P.Sqrt() + outputs = sqrt_obj(x) + return outputs + + +class ReduceSum(Cell): + + def __init__(self, axis): + super(ReduceSum, self).__init__() + self.axis = axis + self.reduce_sum = P.ReduceSum(keep_dims=True) + + def construct(self, input): + return self.reduce_sum(input, self.axis) + + +class ReduceMean(Cell): + + def __init__(self, axis): + super(ReduceMean, self).__init__() + self.axis = axis + self.reducemean = P.ReduceMean(keep_dims=False) + + def construct(self, inputs): + output = self.reducemean(inputs, self.axis) + return output + + +def reduce_mean(input_tensor, axis=None): + """ + Computes the mean of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + Rmean_obj = P.ReduceMean(keep_dims=False) + outputs = Rmean_obj(input_tensor, axis) + return outputs + + +class ReduceMax(Cell): + + def __init__(self, axis): + super(ReduceMax, self).__init__() + self.axis = axis + self.reducemax = P.ReduceMax(keep_dims=False) + + def construct(self, inputs): + output = self.reducemax(inputs, self.axis) + return output + + +def reduce_max(input_tensor, axis=None): + """ + Computes the maximum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + Rmax_obj = P.ReduceMax(keep_dims=False) + outputs = Rmax_obj(input_tensor, axis) + return outputs + + +def reduce_min(input_tensor, axis=None): + """ + Computes the minimum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + Rmin_obj = P.ReduceMin(keep_dims=False) + outputs = Rmin_obj(input_tensor, axis) + return outputs + + +def pad(tensor, paddings, mode='CONSTANT', constant_values=0): + """ + Pads a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + paddings : tuple + A tuple of type int32. + mode : str + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + constant_values : int + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + + Returns + ------- + A Tensor. Has the same type as tensor. + """ + # todo: constant value , padding mode + pass + + +class Unstack(Cell): + + def __init__(self, axis, num=None): + self.axis = axis + self.num = num + + def __call__(self, values): + raise NotImplementedError + + +class Stack(Cell): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, values): + raise NotImplementedError + + +def stack(values, axis=0): + """ + Stacks a list of rank-R tensors into one rank-(R+1) tensor. + + Parameters + ---------- + values : list + A list of Tensor objects with the same shape and type. + axis : int + An int. The axis to stack along. Defaults to the first dimension. + Negative values wrap around, so the valid range is [-(R+1), R+1). + + Returns + ------- + A stacked Tensor with the same type as values. + """ + # todo Not Implemented + raise NotImplementedError + + +def meshgrid(x, y): + """ + Broadcasts parameters for evaluation on an N-D grid. + + Parameters + ---------- + x : tensor + Tensors with rank 1. + y : tensor + Tensors with rank 1. + + Returns + ------- + A list of N Tensors with rank N. + """ + + pass + + +def range(start, limit=None, delta=1, dtype=None): + """ + Creates a sequence of numbers. + + Parameters + ---------- + start : tensor + A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; + otherwise, acts as range limit and first entry defaults to 0. + limit : tensor + A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, + defaults to the value of start while the first entry of the range defaults to 0. + delta : tensor + A 0-D Tensor (scalar). Number that increments start. Defaults to 1. + dtype : type + The type of the elements of the resulting tensor. + + Returns + ------- + An 1-D Tensor of type dtype. + """ + + pass + + +class ExpandDims(Cell): + + def __init__(self, axis): + super(ExpandDims, self).__init__() + self.axis = axis + self.expand_dims = P.ExpandDims() + + def construct(self, input): + output = self.expand_dims(input, self.axis) + return output + + +def expand_dims(input, axis): + """ + Inserts a dimension of 1 into a tensor's shape. + + Parameters + ---------- + input : tensor + A Tensor. + axis : int + 0-D (scalar). Specifies the dimension index at which to expand the shape of input. + Must be in the range [-rank(input) - 1, rank(input)]. + + Returns + ------- + A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. + """ + + expand_obj = P.ExpandDims() + outputs = expand_obj(input, axis) + return outputs + + +class Tile(Cell): + + def __init__(self): + super(Tile, self).__init__() + self.tile = P.Tile() + + def construct(self, input, multiples): + return self.tile(input, tuple(multiples)) + + +def tile(input, multiples): + """ + Constructs a tensor by tiling a given tensor. + + Parameters + ---------- + input : tensor + A Tensor. 1-D or higher. + multiples : tensor + Must be one of the following types: int32, int64. 1-D. + Length must be the same as the number of dimensions in input + + Returns + ------- + A Tensor. Has the same type as input. + """ + tile_obj = P.Tile() + outputs = tile_obj(input, multiples) + return outputs + + +class Cast(Cell): + + def __init__(self, dtype): + super(Cast, self).__init__() + self.dtype = dtype + self.cast = P.Cast() + + def construct(self, input): + return self.cast(input, dtype=self.dtype) + + +def cast(x, dtype): + """ + Casts a tensor to a new type. + + Parameters + ---------- + x : tensor + A Tensor or SparseTensor or IndexedSlices of numeric type. + It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. + dtype : dtpye + The destination type. The list of supported dtypes is the same as x + + Returns + ------- + A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. + """ + cast_obj = P.Cast() + outputs = cast_obj(x, dtype) + return outputs + + +class Transpose(Cell): + + def __init__(self, perm, conjugate=False): + super(Transpose, self).__init__() + self.perm = tuple(perm) + self.conjugate = conjugate + self.transpose = P.Transpose() + if self.conjugate: + raise NotImplementedError("conjugate not implemented") + + def construct(self, a): + return self.transpose(a, self.perm) + + +def transpose(a, perm=None, conjugate=False): + """ + Transposes a. + + Parameters + ---------- + a : tensor + A Tensor. + perm : int + A permutation of the dimensions of a. + conjugate : bool + Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)). + + Returns + ------- + A transposed Tensor. + """ + # todo None conjugate + trans_obj = P.Transpose() + outputs = trans_obj(a, perm) + print(outputs) + + +def gather_nd(params, indices, batch_dims=0): + """ + Gather slices from params into a Tensor with shape specified by indices. + + Parameters + ---------- + params : tensor + The tensor from which to gather values. + indices : tensor + Must be one of the following types: int32, int64. Index tensor. + batch_dims : int + An integer or a scalar 'Tensor'. The number of batch dimensions. + + Returns + ------- + A Tensor. Has the same type as params. + """ + + pass + + +def clip_by_value(t, clip_value_min, clip_value_max): + """ + Clips tensor values to a specified min and max. + + Parameters + ---------- + t : tensor + A Tensor or IndexedSlices + clip_value_min : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + clip_value_max : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + + Returns + ------- + A clipped Tensor or IndexedSlices. + """ + + pass + + +def split(value, num_or_size_splits, axis=0, num=None): + """ + Splits a tensor into sub tensors. + + Parameters + ---------- + value : tensor + The Tensor to split. + num_or_size_splits : list + Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or + Python list containing the sizes of each output tensor along split_dim. + axis : int + The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. + num : int + used to specify the number of outputs when it cannot be inferred from the shape of size_splits. + + Returns + ------- + Tensor objects resulting from splitting value. + """ + pass + + +def floor(x): + return NotImplementedError + + +def gather(params, indices): + return NotImplementedError + + +def linspace(start, stop, num): + return NotImplementedError + + +def slice(inputs, starts, sizes): + return NotImplementedError + + +def add_n(inputs): + return NotImplementedError + + +class OneHot(Cell): + + def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32): + super(OneHot, self).__init__() + self.onehot = P.OneHot(axis) + self.depth = depth + self.dtype = dtype + self.on_value = F.cast(on_value, self.dtype) + self.off_value = F.cast(off_value, self.dtype) + + def construct(self, indices): + return self.onehot(indices, self.depth, self.on_value, self.off_value) + + +class L2Normalize(Cell): + + def __init__(self, axis=None, epsilon=1e-12): + super(L2Normalize, self).__init__() + pass + + def __call__(self, input, *args, **kwargs): + pass + + +class EmbeddingLookup(Cell): + + def __init__(self, max_norm=None): + self.max_norm = max_norm + + def __call__(self, params, ids, *args, **kwargs): + pass + + +class NCELoss(object): + + def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): + super(NCELoss, self).__init__() + + def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): + pass + + +class Not_equal(object): + + def __init__(self): + pass + + def __call__(self, x, y): + pass + + +class Count_nonzero(object): + + def __init__(self, keepdims=None, dtype=int64): + pass + + def __call__(self, *args, **kwargs): + pass + + +class Resize(Cell): + + def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): + super(Resize, self).__init__() + self.data_format = data_format + if method not in ['nearest', 'bilinear']: + raise ('The method must be "nearest" or "bilinear".') + self.method = method + if self.method == 'nearest': + self.resize = P.ResizeNearestNeighbor(size=tuple(scale), align_corners=antialias) + elif self.method == 'bilinear': + if ksize is None: + raise ('The bilinear method must enter ksize. The dimension of size must be 2 (H, W).') + out_seize = (int(ksize[0] * scale[0]), int(ksize[1] * scale[1])) + self.resize = P.ResizeBilinear(size=out_seize) + + def construct(self, inputs): + if self.data_format == 'channels_last': + inputs = nhwc_to_nchw(inputs) + outputs = self.resize(inputs) + if self.data_format == 'channels_last': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def resize(inputs, output_size, method, antialias): + raise NotImplementedError + + +class ZeroPadding1D(Cell): + + def __init__(self): + super(ZeroPadding1D, self).__init__() + + def construct(self, *inputs, **kwargs): + raise NotImplementedError + + +class ZeroPadding2D(Cell): + + def __init__(self): + super(ZeroPadding2D, self).__init__() + + def construct(self, *inputs, **kwargs): + raise NotImplementedError + + +class ZeroPadding3D(Cell): + + def __init__(self): + super(ZeroPadding3D, self).__init__() + + def construct(self, *inputs, **kwargs): + raise NotImplementedError + + +class Sign(Cell): + + def __init__(self): + super(Sign).__init__() + self.sign = P.Sign() + + def construct(self, x): + return self.sign(x) diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py new file mode 100644 index 000000000..73e259f08 --- /dev/null +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -0,0 +1,1187 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function + +from mindspore.nn.cell import Cell +from mindspore import context +import mindspore as ms +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from mindspore.communication.management import get_group_size, get_rank +from mindspore.communication import management +from mindspore._checkparam import check_int_positive +from mindspore._extends import cell_attr_register + + +def padding_format(padding): + """ + Checks that the padding format correspond format. + + Parameters + ---------- + padding : str + Must be one of the following:"same", "SAME", "VALID", "valid" + + Returns + ------- + str "SAME" or "VALID" + """ + + if padding in ["SAME", "same"]: + padding = "same" + elif padding in ["VALID", "valid"]: + padding = "valid" + elif padding == None: + padding = None + else: + raise Exception("Unsupported padding: " + str(padding)) + return padding + + +def preprocess_1d_format(data_format, padding): + """ + Checks that the 1-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NWC","NCW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NWC" or "NCW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NWC"]: + data_format = "NWC" + elif data_format in ["channels_first", "NCW"]: + data_format = "NCW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_2d_format(data_format, padding): + """ + Checks that the 2-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NHWC","NCHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NHWC" or "NCHW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NHWC", "nhwc"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW", "nchw"]: + data_format = "NCHW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_3d_format(data_format, padding): + """ + Checks that the 3-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NDHWC" or "NCDHW" and "SAME" or "VALID" + """ + + if data_format in ['channels_last', 'NDHWC']: + data_format = 'NDHWC' + elif data_format in ['channels_first', 'NCDHW']: + data_format = 'NCDHW' + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def nchw_to_nhwc(x): + """ + Channels first to channels last + + Parameters + ---------- + x : tensor + channels first tensor data + + Returns + ------- + channels last tensor data + """ + + if len(P.Shape()(x)) == 3: + x = P.Transpose()(x, (0, 2, 1)) + elif len(P.Shape()(x)) == 4: + x = P.Transpose()(x, (0, 2, 3, 1)) + elif len(P.Shape()(x)) == 5: + x = P.Transpose()(x, (0, 2, 3, 4, 1)) + # else: + # raise Exception("Unsupported dimensions") + return x + + +def nhwc_to_nchw(x): + """ + Channles last to channels first + + Parameters + ---------- + x : tensor + channels last tensor data + + Returns + ------- + channels first tensor data + """ + + if len(P.Shape()(x)) == 3: + x = P.Transpose()(x, (0, 2, 1)) + elif len(P.Shape()(x)) == 4: + x = P.Transpose()(x, (0, 3, 1, 2)) + elif len(P.Shape()(x)) == 5: + x = P.Transpose()(x, (0, 4, 1, 2, 3)) + # else: + # raise Exception("Unsupported dimensions") + return x + + +class ReLU(Cell): + + def __init__(self): + super(ReLU, self).__init__() + self.relu = P.ReLU() + + def construct(self, x): + return self.relu(x) + + +def relu(x): + """ + Computes rectified linear: max(features, 0). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor. Has the same type as features. + """ + outputs = P.ReLU() + return outputs(x) + + +class ReLU6(Cell): + + def __init__(self): + super(ReLU6, self).__init__() + self.relu6 = P.ReLU6() + + def construct(self, x): + return self.relu6(x) + + +def relu6(x): + """ + Computes Rectified Linear 6: min(max(features, 0), 6). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor with the same type as features. + """ + outputs = P.ReLU6() + return outputs(x) + + +class LeakyReLU(Cell): + + def __init__(self, alpha=0.2): + super(LeakyReLU, self).__init__() + self.leakyrelu = ms.nn.LeakyReLU(alpha=alpha) + + def construct(self, x): + return self.leakyrelu(x) + + +def leaky_relu(x): + """ + Compute the Leaky ReLU activation function. + + Parameters + ---------- + x : tensor + representing preactivation values. Must be one of the following types: + float16, float32, float64, int32, int64. + + Returns + ------- + The activation value. + """ + + pass + + +class Softplus(Cell): + + def __init__(self): + super(Softplus, self).__init__() + self.softplus = P.Softplus() + + def construct(self, x): + return self.softplus(x) + + +def softplus(x): + """ + Computes softplus: log(exp(features) + 1). + + Parameters + ---------- + x : tensor + Must be one of the following types: half, bfloat16, float32, float64. + + Returns + ------- + A Tensor. Has the same type as features. + """ + + pass + + +class Tanh(Cell): + + def __init__(self): + super(Tanh, self).__init__() + self.tanh = P.Tanh() + + def construct(self, x): + return self.tanh(x) + + +def tanh(x): + """ + Computes hyperbolic tangent of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + + pass + + +class Sigmoid(Cell): + + def __init__(self): + super(Sigmoid, self).__init__() + self.sigmoid = P.Sigmoid() + + def construct(self, x): + return self.sigmoid(x) + + +def sigmoid(x): + """ + Computes sigmoid of x element-wise. + + Parameters + ---------- + x : tensor + A Tensor with type float16, float32, float64, complex64, or complex128. + + Returns + ------- + A Tensor with the same type as x. + """ + outputs = P.Sigmoid() + return outputs(x) + + +class Softmax(Cell): + + def __init__(self): + super(Softmax, self).__init__() + self.softmax = P.Softmax() + + def construct(self, x): + return self.softmax(x) + + +def softmax(logits, axis=None): + """ + Computes softmax activations. + + Parameters + ---------- + logits : tensor + Must be one of the following types: half, float32, float64. + axis : int + The dimension softmax would be performed on. The default is -1 which indicates the last dimension. + + Returns + ------- + A Tensor. Has the same type and shape as logits. + """ + outputs = P.Softmax(axis) + return outputs(logits) + + +class Dropout(Cell): + + def __init__(self, keep, seed=0): + super(Dropout, self).__init__() + self.dropout = P.Dropout(keep_prob=keep) + self.is_gpu = context.get_context('device_target') in ["GPU"] + self.get_shape = P.Shape() + self.dropout_gen_mask = P.DropoutGenMask(Seed0=seed, Seed1=0) + self.dropout_do_mask = P.DropoutDoMask() + self.cast = P.Cast() + self.keep_prob = keep # ms.Tensor(keep, dtype=ms.float32) + # print(self.keep_prob, type(self.keep_prob)) + + def construct(self, inputs): + if self.is_gpu: + outputs, _ = self.dropout(inputs) + return outputs + if self.keep_prob == 1: + return inputs + shape = self.get_shape(inputs) + dtype = P.DType()(inputs) + if self._is_float_dtype(dtype): + keep_prob = self.cast(self.keep_prob, dtype=dtype) + else: + keep_prob = self.cast(self.keep_prob, ms.float16) + output = self.dropout_gen_mask(shape, keep_prob) + return self.dropout_do_mask(inputs, output, keep_prob) + + def _is_float_dtype(dtype): + if dtype in [ms.float32, ms.float16]: + return True + return False + + +class BiasAdd(Cell): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + Returns + ------- + A Tensor with the same type as value. + """ + + def __init__(self, data_format='channels_first'): + super(BiasAdd, self).__init__() + self.bias_add = P.BiasAdd() + if data_format in ['channels_first', 'NCW', 'NCHW', 'NCDHW']: + self.data_format = 'channels_first' + elif data_format in ['channels_last', 'NWC', 'NHWC', 'NDHWC']: + self.data_format = 'channels_last' + else: + raise ("Unsupported data format: " + str(data_format)) + + def construct(self, x, bias): + if self.data_format == 'channels_last': + x = nhwc_to_nchw(x) + outputs = self.bias_add(x, bias) + if self.data_format == 'channels_last': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def bias_add(x, bias): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + data_format : A string. + 'N...C' and 'NC...' are supported. + name : str + A name for the operation (optional). + Returns + ------- + A Tensor with the same type as value. + """ + raise NotImplementedError + + +class Conv1D(Cell): + pass + # raise NotImplementedError + + +def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): + """ + Computes a 1-D convolution given 3-D input and filter tensors. + + Parameters + ---------- + input : tensor + A 3D Tensor. Must be of type float16, float32, or float64 + filters : tensor + A 3D Tensor. Must have the same type as input. + stride : int of list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'SAME' or 'VALID' + data_format : string + An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of + [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. + dilations : int or list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + Returns + ------- + A Tensor. Has the same type as input. + """ + + pass + + +class Conv2D(Cell): + + def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): + super(Conv2D, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + + if self.data_format is 'NHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + # self.transpose = P.Transpose() + elif self.data_format is 'NCHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + # print(out_channel, k_size, self.padding, self.ms_stride, self.ms_dilation) + self.conv2d = P.Conv2D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, mode=1, group=1 + ) + + def construct(self, inputs, filters): + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + outputs = self.conv2d(inputs, filters) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): + """ + Computes a 2-D convolution given 4-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. + The dimension order is interpreted according to the value of data_format, see below for details. + filters : tensor + Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] + strides : int of list + The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. + padding : string + "SAME" or "VALID" + data_format : string + "NHWC", "NCHW". Defaults to "NCHW". + dilations : list or ints + list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. + + Returns + ------- + A Tensor. Has the same type as input. + """ + raise NotImplementedError + + +class Conv3D(Cell): + pass + # raise NotImplementedError + + +def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): + """ + Computes a 3-D convolution given 5-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. + Shape [batch, in_depth, in_height, in_width, in_channels]. + filters : tensor + Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. + in_channels must match between input and filters. + strides : list of ints + A list of ints that has length >= 5. 1-D tensor of length 5. + The stride of the sliding window for each dimension of input. + Must have strides[0] = strides[4] = 1. + padding : string + A string from: "SAME", "VALID". The type of padding algorithm to use. + data_format : string + An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + dilations : list of ints + Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. + If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of data_format, see above for details. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as input. + """ + + raise NotImplementedError + + +def lrn(inputs, depth_radius, bias, alpha, beta): + """ + Local Response Normalization. + + Parameters + ---------- + inputs : tensor + Must be one of the following types: half, bfloat16, float32. 4-D. + depth_radius : int + Defaults to 5. 0-D. Half-width of the 1-D normalization window. + bias : float + Defaults to 1. An offset (usually positive to avoid dividing by 0). + alpha : float + Defaults to 1. A scale factor, usually positive. + beta : float + Defaults to 0.5. An exponent. + + Returns + ------- + A Tensor. Has the same type as input. + """ + pass + + +def moments(x, axes, shift=None, keepdims=False): + """ + Calculates the mean and variance of x. + + Parameters + ---------- + x : tensor + A Tensor + axes : ints + Axes along which to compute mean and variance. + shift : int + Not used in the current implementation. + keepdims : bool + produce moments with the same dimensionality as the input. + + Returns + ------- + Two Tensor objects: mean and variance. + """ + + pass + + +class MaxPool(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(MaxPool, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) + ms_ksize = ksize[1] + ms_strides = strides[1] + self.maxpool = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=self.padding) + + def construct(self, inputs): + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + outputs = self.maxpool(inputs) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def max_pool(input, ksize, strides, padding, data_format=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start + with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". + Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) + if data_format == 'NHWC': + input = nhwc_to_nchw(input) + + ms_ksize = ksize[1] + ms_strides = strides[2] + outputs = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=padding)(input) + # channel first to channel last + if data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +class AvgPool(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(AvgPool, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) + ms_ksize = ksize[1] + ms_strides = strides[1] + self.avgpool = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding) + + def construct(self, inputs): + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + outputs = self.avgpool(inputs) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def avg_pool(input, ksize, strides, padding): + """ + Performs the avg pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The average pooled output tensor. + """ + padding = padding_format(padding) + ms_ksize = ksize[0] + ms_strides = strides[1] + outputs = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding) + return outputs(input) + + +def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of the format specified by data_format. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + pass + + +def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the average pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + name : string + Optional name for the operation. + + Returns + ------- + A Tensor with the same type as value. The average pooled output tensor. + """ + pass + + +def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): + """ + Performs an N-D pooling operation. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + window_shape : int + Sequence of N ints >= 1. + pooling_type : string + Specifies pooling operation, must be "AVG" or "MAX". + strides : ints + Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. + padding : string + The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". + See the "returns" section of tf.ops.convolution for details. + data_format : string + Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), + or the second dimension (if data_format starts with "NC"). + For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations : list of ints + Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. + name : string + Optional. Name of the op. + + Returns + ------- + Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] + """ + pass + + +class DepthwiseConv2d(Cell): + + def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): + super(DepthwiseConv2d, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + self.depthwise_conv2d = P.DepthwiseConv2dNative( + channel_multiplier=channel_multiplier, kernel_size=ksize, stride=self.ms_stride, dilation=self.ms_dilation + ) + + def construct(self, input, filter): + if self.data_format == 'NHWC': + input = nhwc_to_nchw(input) + outputs = self.depthwise_conv2d(input, filter) + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): + """ + Depthwise 2-D convolution. + + Parameters + ---------- + input : tensor + 4-D with shape according to data_format. + filter : tensor + 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. + strides : list + 1-D of size 4. The stride of the sliding window for each dimension of input. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + The data format for input. Either "NHWC" (default) or "NCHW". + dilations : list + 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. + If it is greater than 1, then all values of strides must be 1. + name : string + A name for this operation (optional). + + Returns + ------- + A 4-D Tensor with shape according to data_format. + E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. + """ + + pass + + +def conv1d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None +): + """ + The transpose of conv1d. + + Parameters + ---------- + input : tensor + A 3-D Tensor of type float and shape [batch, in_width, in_channels] + for NWC data format or [batch, in_channels, in_width] for NCW data format. + filters : tensor + A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NWC' and 'NCW' are supported. + dilations : list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + pass + + +def conv2d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None +): + """ + The transpose of conv2d. + + Parameters + ---------- + input : tensor + A 4-D Tensor of type float and shape [batch, height, width, in_channels] + for NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 4-D Tensor with the same type as input and shape [height, width, + output_channels, in_channels]. filter's in_channels dimension must match that of input. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. + If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 0. + The dimension order is determined by the value of data_format, see below for details. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NHWC' and 'NCHW' are supported. + dilations : list + An int or list of ints that has length 1, 2 or 4, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as input. + """ + pass + + +def conv3d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None +): + """ + The transpose of conv3d. + + Parameters + ---------- + input : tensor + A 5-D Tensor of type float and shape [batch, height, width, in_channels] for + NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 3 or 5. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + dilations : list of ints + An int or list of ints that has length 1, 3 or 5, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + + pass + + +class BatchNorm(Cell): + """Batch Normalization base class.""" + + @cell_attr_register + def __init__( + self, num_features, epsilon=1e-5, decay=0.9, gamma=None, beta=None, moving_mean=None, moving_var=None, + is_train=None, device_num_each_group=1, data_format='channels_last' + ): + super(BatchNorm, self).__init__() + if num_features < 1: + raise ValueError("num_features must be at least 1") + + if decay < 0 or decay > 1: + raise ValueError("momentum should be a number in range [0, 1], but got {}".format(decay)) + + self.data_format = data_format + self.use_batch_statistics = is_train + self.num_features = num_features + self.eps = epsilon + self.moving_mean = moving_mean + self.moving_variance = moving_var + self.gamma = gamma + self.beta = beta + self.group = check_int_positive(device_num_each_group) + self.is_global = False + if self.group != 1: + self.rank_id = get_rank() + self.rank_size = get_group_size() + self.device_list = [i for i in range(0, self.rank_size)] + self.rank_list = self.list_group(self.device_list, self.group) + self.rank_list_idx = len(self.rank_list) + for i in range(self.rank_list_idx): + if self.rank_id in self.rank_list[i] and self.group != 1: + self.is_global = True + management.create_group('group' + str(i), self.rank_list[i]) + self.all_reduce = P.AllReduce(P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) + self.shape = P.Shape() + self.reduce_mean = P.ReduceMean(keep_dims=True) + self.square = P.Square() + self.sqrt = P.Sqrt() + self.cast = P.Cast() + self.dtype = P.DType() + self.reshape = P.Reshape() + self.is_ascend = context.get_context("device_target") == "Ascend" + self.is_gpu = context.get_context("device_target") == "GPU" + self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE + self.momentum = 1.0 - decay + if context.get_context("enable_ge"): + self.is_ge_backend = True + else: + self.is_ge_backend = False + + if self.is_graph_mode and (self.is_ge_backend or self.is_ascend): + self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) + elif self.is_gpu: + self.bn_train = P.FusedBatchNormEx(mode=1, epsilon=self.eps, momentum=self.momentum) + else: + self.bn_train = P.FusedBatchNorm(mode=1, epsilon=self.eps, momentum=self.momentum) + self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps) + self.enable_global_sync = self.is_global and (self.is_ge_backend or (self.is_graph_mode and self.is_ascend)) + self.enable_default_train = self.is_graph_mode and not self.is_global and \ + (self.is_ge_backend or self.is_ascend) + + data_parallel_strategy = ((1, ), (1, )) + data_parallel_strategy_one = ((1, ), ()) + self.sub_mean = P.Sub().shard(data_parallel_strategy) + self.sub_var = P.Sub().shard(data_parallel_strategy) + self.mul_mean = P.Mul().shard(data_parallel_strategy_one) + self.mul_var = P.Mul().shard(data_parallel_strategy_one) + self.assign_sub_mean = P.AssignSub().shard(data_parallel_strategy) + self.assign_sub_var = P.AssignSub().shard(data_parallel_strategy) + + def _check_data_dim(self, x): + raise NotImplementedError + + def list_group(self, world_rank, group_size): + if group_size > get_group_size(): + raise ValueError( + "group size can not be greater than local rank size, group size is {}, " + "local_rank_size is {}".format(group_size, get_group_size()) + ) + if len(world_rank) % group_size != 0: + raise ValueError("please make your group size correct.") + world_rank_list = zip(*(iter(world_rank), ) * group_size) + group_list = [list(i) for i in world_rank_list] + return group_list + + def _global_sync(self, x, axes, re_shape): + """calculate global batch normalization output""" + x_mean = self.reduce_mean(x, axes) + x_mean_square = self.reduce_mean(self.square(x), axes) + global_batch_mean = self.all_reduce(x_mean) / self.group + global_batch_mean_square = self.all_reduce(x_mean_square) / self.group + global_mean = global_batch_mean + global_var = global_batch_mean_square - self.square(global_mean) + var_sqrt = self.sqrt(global_var + self.eps) + mean_first = (x - global_mean) / var_sqrt + y = mean_first * self.reshape(self.gamma, re_shape) + self.reshape(self.beta, re_shape) + + mean_sub = self.sub_mean(self.reshape(self.moving_mean, re_shape), global_mean) + tmp_mean = self.mul_mean(mean_sub, self.cast(self.momentum, self.dtype(mean_sub))) + mean_sub2 = self.sub_var(self.reshape(self.moving_mean, re_shape), global_var) + tmp_variance = self.mul_var(mean_sub2, self.cast(self.momentum, self.dtype(mean_sub2))) + y = F.depend(y, self.assign_sub_mean(self.moving_mean, self.reshape(tmp_mean, self.shape(self.moving_mean)))) + y = F.depend( + y, self.assign_sub_var(self.moving_variance, self.reshape(tmp_variance, self.shape(self.moving_variance))) + ) + return y + + def get_dim(self, input): + dim = len(self.shape(input)) + if dim == 2: + return '1d' + elif dim == 4: + return '2d' + else: + raise ValueError("The input must has 2 dims or 4 dims.") + + def _shape_check_bn(self, in_shape, in_dims): + dim = len(in_shape) + if in_dims == '1d' and dim != 2: + raise ValueError("The input must has 2 dims.") + if in_dims == '2d' and dim != 4: + raise ValueError("The input must has 4 dims.") + if in_dims == 'both' and dim != 2 and dim != 4: + raise ValueError("The input must has 2 dims or 4 dims.") + + def _shape_infer(self, x_shape, num_feature): + """global batch normalization shape and axes infer""" + if len(x_shape) == 4: + axes = (0, 2, 3) + re_shape = (1, num_feature, 1, 1) + else: + axes = (0, ) + re_shape = (1, num_feature) + return axes, re_shape + + def construct(self, inputs): + x = inputs + self._shape_check_bn(self.shape(x), self.get_dim(x)) + if self.use_batch_statistics is None: + flag = self.training + else: + flag = self.use_batch_statistics + + if flag: + if self.enable_global_sync: + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + x = nhwc_to_nchw(x) + axes, re_shape = self._shape_infer(F.shape(x), self.num_features) + y = self._global_sync(x, axes, re_shape) + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + y = nchw_to_nhwc(y) + return y + + if self.enable_default_train: + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + x = nhwc_to_nchw(x) + y, batch_mean, batch_var, _, _ = self.bn_train(x, self.gamma, self.beta, None, None) + + mean_sub = self.sub_mean(self.moving_mean, batch_mean) + temp_mean = self.mul_mean(mean_sub, self.momentum) + mean_sub2 = self.sub_var(self.moving_variance, batch_var) + temp_variance = self.mul_var(mean_sub2, self.momentum) + y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) + y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + y = nchw_to_nhwc(y) + return y + + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + x = nhwc_to_nchw(x) + y = self.bn_train(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + y = nchw_to_nhwc(y) + return y + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + x = nhwc_to_nchw(x) + y = self.bn_infer(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] + if self.data_format == 'channels_last' and self.get_dim(x) == '2d': + y = nchw_to_nhwc(y) + return y diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py new file mode 100644 index 000000000..2f381fb4d --- /dev/null +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -0,0 +1,956 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function +from .tensorflow_nn import nchw_to_nhwc, nhwc_to_nchw +import tensorflow as tf + +_dtypeDict = { + 'DType': tf.DType, + 'float16': tf.float16, + 'float32': tf.float32, + 'float64': tf.float64, + 'int8': tf.int8, + 'int16': tf.int16, + 'int32': tf.int32, + 'int64': tf.int64, + 'uint8': tf.uint8, + 'uint16': tf.uint16, + 'uint32': tf.uint32, + 'uint64': tf.uint64 +} + +DType = tf.DType +float16 = tf.float16 +float32 = tf.float32 +float64 = tf.float64 +int8 = tf.int8 +int16 = tf.int16 +int32 = tf.int32 +int64 = tf.int64 +uint8 = tf.uint8 +uint16 = tf.uint16 +uint32 = tf.uint32 +uint64 = tf.uint64 + +# isinstance input output +# TensorLike = tf_ops._TensorLike + + +def set_context(**kwargs): + raise Exception("Using TenosrFlow backend,You don't need to set context") + + +def get_tensor_shape(x): + return x.get_shape().as_list() + + +# initializers +def zeros(shape, dtype=tf.float32): + """ + Creates a tensor with all elements set to zero. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return tf.zeros(shape=shape, dtype=dtype) + + +def ones(shape, dtype=tf.float32): + """ + Creates a tensor with all elements set to ones. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return tf.ones(shape=shape, dtype=dtype) + + +def constant(value, dtype=tf.float32, shape=None): + """ + Creates a constant tensor from a tensor-like object. + + Parameters + ---------- + value : list + A constant value (or list) of output type dtype. + dtype : tensor + The type of the elements of the resulting tensor. + shape : tuple + Optional dimensions of resulting tensor. + + Returns + ------- + A Constant Tensor. + + """ + return tf.constant(value=value, dtype=dtype, shape=shape) + + +def random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None): + """ + Outputs random values from a uniform distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + minval : int + The lower bound on the range of random values to generate (inclusive). Defaults to 0. + maxval : int + The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. + dtype : tensor + The type of the output: float16, float32, float64, int32, or int64. + seed : int + Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls. + Returns + ------- + A tensor of the specified shape filled with random uniform values. + + """ + outputs = tf.random.uniform(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + return outputs + + +def random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.dtypes.float32, seed=None): + """ + Outputs random values from a normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random normal values. + + """ + outputs = tf.random.normal(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + return outputs + + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None): + """ + Outputs random values from a truncated normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random truncated normal values. + + """ + outputs = tf.random.truncated_normal(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + return outputs + + +def he_normal(shape, dtype, seed=None): + """ + He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + dtype : tensor + The type of the output. + + Returns + ------- + A tensor of the specified shape filled with he normal values. + """ + return tf.initializers.he_normal(seed)(shape=shape, dtype=dtype) + + +def Variable(initial_value, name, trainable=True): + """ + Creates a new variable with value initial_value. + + Parameters + ---------- + initial_value : tensor + A Tensor, or Python object convertible to a Tensor + name : str + Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. + Returns + ------- + Variable + """ + + var = tf.Variable(initial_value=initial_value, name=name, trainable=trainable) + return var + + +class MatMul(object): + + def __init__(self): + pass + + def __call__(self, a, b): + return tf.matmul(a, b) + + +def matmul(a, b): + """ + Multiplies matrix a by matrix b, producing a * b. + + Parameters + ---------- + a : tensor + type float16, float32, float64, int32, complex64, complex128 and rank > 1. + b : tensor + with same type and rank as a. + + Returns + ------- + A Tensor of the same type as a and b + """ + + outputs = tf.matmul(a, b) + return outputs + + +def add(value, bias): + """ + Returns x + y element-wise. + + Parameters + ---------- + value : tensor. + Must be one of the following types: bfloat16, half, float32, float64, + uint8, int8, int16, int32, int64, complex64, complex128, string. + bias : tensor + Must have the same type as a + + Returns + ------- + A Tensor. Has the same type as a. + """ + + outputs = tf.add(value, bias) + return outputs + + +def dtypes(dt): + """ + Data dtypes. + + Parameters + ---------- + dt : string + It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', + 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. + + Returns + ------- + Data dtypes + """ + + if dt not in _dtypeDict.keys(): + raise Exception("Unsupported dtype: {}".format(dt)) + return _dtypeDict[dt] + + +def minimum(x, y): + """ + Returns the min of x and y (i.e. x < y ? x : y) element-wise. + + Parameters + ---------- + x : tensor. + Must be one of the following types: bfloat16, half, float32, float64, int32, int64. + y : A Tensor. + Must have the same type as x. + + Returns + ------- + A Tensor. Has the same type as x + """ + + outputs = tf.minimum(x=x, y=y) + return outputs + + +class FlattenReshape(object): + + def __init__(self): + pass + + def __call__(self, inputs): + dim = 1 + for d in get_tensor_shape(inputs)[1:]: + dim *= d + return tf.reshape(inputs, [-1, dim]) + + +class Reshape(object): + + def __init__(self, shape): + self.shape = shape + + def __call__(self, tensor): + return tf.reshape(tensor, self.shape) + + +def reshape(tensor, shape): + """ + Reshapes a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + shape : tensor + Defines the shape of the output tensor. + Returns + ------- + A Tensor. Has the same type as tensor + """ + + return tf.reshape(tensor, shape) + + +class Concat(object): + + def __init__(self, axis): + super(Concat, self).__init__() + self.axis = axis + + def __call__(self, values): + return tf.concat(values=values, axis=self.axis) + + +def concat(values, axis): + """ + Concatenates tensors along one dimension. + + Parameters + ---------- + values : list + A list of Tensor objects or a single Tensor + axis : int + 0-D int32 Tensor. Dimension along which to concatenate + Returns + ------- + A Tensor resulting from concatenation of the input tensors. + """ + + return tf.concat(values, axis) + + +def convert_to_tensor(value, dtype=None): + """ + Converts the given value to a Tensor. + + Parameters + ---------- + value : object + An object whose type has a registered Tensor conversion function. + dtype : optional + Optional element type for the returned tensor. If missing, the type is inferred from the type of value. + + Returns + ------- + A Tensor based on value. + """ + + return tf.convert_to_tensor(value, dtype) + + +def sqrt(x): + """ + Computes square root of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + return tf.sqrt(x) + + +class ReduceSum(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, input): + return tf.reduce_sum(input, axis=self.axis) + + +class ReduceMean(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, inputs): + output = tf.reduce_mean(inputs, self.axis) + return output + + +def reduce_mean(input_tensor, axis=None): + """ + Computes the mean of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + return tf.reduce_mean(input_tensor, axis=axis) + + +class ReduceMax(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, inputs): + output = tf.reduce_max(inputs, self.axis) + return output + + +def reduce_max(input_tensor, axis=None): + """ + Computes the maximum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + return tf.reduce_max(input_tensor, axis=axis) + + +def reduce_min(input_tensor, axis=None): + """ + Computes the minimum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + return tf.reduce_min(input_tensor, axis=axis) + + +def pad(tensor, paddings, mode='CONSTANT', constant_values=0): + """ + Pads a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + paddings : tensor + A Tensor of type int32. + mode : str + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + constant_values : int + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + + Returns + ------- + A Tensor. Has the same type as tensor. + """ + + if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: + raise Exception("Unsupported mode: {}".format(mode)) + outputs = tf.pad(tensor, paddings, mode=mode, constant_values=constant_values) + return outputs + + +class Unstack(object): + + def __init__(self, axis, num=None): + self.axis = axis + self.num = num + + def __call__(self, values): + return tf.unstack(values, num=self.num, axis=self.axis) + + +class Stack(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, values): + return tf.stack(values, axis=self.axis) + + +def stack(values, axis=0): + """ + Stacks a list of rank-R tensors into one rank-(R+1) tensor. + + Parameters + ---------- + values : list + A list of Tensor objects with the same shape and type. + axis : int + An int. The axis to stack along. Defaults to the first dimension. + Negative values wrap around, so the valid range is [-(R+1), R+1). + + Returns + ------- + A stacked Tensor with the same type as values. + """ + + return tf.stack(values, axis=axis) + + +def meshgrid(x, y): + """ + Broadcasts parameters for evaluation on an N-D grid. + + Parameters + ---------- + x : tensor + Tensors with rank 1. + y : tensor + Tensors with rank 1. + + Returns + ------- + A list of N Tensors with rank N. + """ + + return tf.meshgrid(x, y) + + +def range(start, limit=None, delta=1, dtype=None): + """ + Creates a sequence of numbers. + + Parameters + ---------- + start : tensor + A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; + otherwise, acts as range limit and first entry defaults to 0. + limit : tensor + A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, + defaults to the value of start while the first entry of the range defaults to 0. + delta : tensor + A 0-D Tensor (scalar). Number that increments start. Defaults to 1. + dtype : type + The type of the elements of the resulting tensor. + + Returns + ------- + An 1-D Tensor of type dtype. + """ + + if limit is None: + outputs = tf.range(start, delta=delta, dtype=dtype) + else: + outputs = tf.range(start, limit, delta=delta, dtype=dtype) + return outputs + + +class ExpandDims(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, input): + return tf.expand_dims(input, axis=self.axis) + + +def expand_dims(input, axis): + """ + Inserts a dimension of 1 into a tensor's shape. + + Parameters + ---------- + input : tensor + A Tensor. + axis : int + 0-D (scalar). Specifies the dimension index at which to expand the shape of input. + Must be in the range [-rank(input) - 1, rank(input)]. + + Returns + ------- + A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. + """ + + return tf.expand_dims(input, axis) + + +class Tile(object): + + def __init__(self): + pass + + def __call__(self, input, multiples): + return tf.tile(input, multiples) + + +def tile(input, multiples): + """ + Constructs a tensor by tiling a given tensor. + + Parameters + ---------- + input : tensor + A Tensor. 1-D or higher. + multiples : tensor + Must be one of the following types: int32, int64. 1-D. + Length must be the same as the number of dimensions in input + + Returns + ------- + A Tensor. Has the same type as input. + """ + + return tf.tile(input, multiples) + + +class Cast(object): + + def __init__(self, dtype): + self.dtype = dtype + + def __call__(self, x): + return tf.cast(x, dtype=self.dtype) + + +def cast(x, dtype): + """ + Casts a tensor to a new type. + + Parameters + ---------- + x : tensor + A Tensor or SparseTensor or IndexedSlices of numeric type. + It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. + dtype : dtpye + The destination type. The list of supported dtypes is the same as x + + Returns + ------- + A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. + """ + + return tf.cast(x, dtype=dtype) + + +class Transpose(object): + + def __init__(self, perm, conjugate=False): + self.perm = perm + self.conjugate = conjugate + + def __call__(self, a): + return tf.transpose(a, self.perm, self.conjugate) + + +def transpose(a, perm=None, conjugate=False): + """ + Transposes a. + + Parameters + ---------- + a : tensor + A Tensor. + perm : int + A permutation of the dimensions of a. + conjugate : bool + Setting it to True is mathematically equivalent to tf.math.conj(tf.transpose(input)). + + Returns + ------- + A transposed Tensor. + """ + + return tf.transpose(a, perm, conjugate) + + +def gather_nd(params, indices, batch_dims=0): + """ + Gather slices from params into a Tensor with shape specified by indices. + + Parameters + ---------- + params : tensor + The tensor from which to gather values. + indices : tensor + Must be one of the following types: int32, int64. Index tensor. + batch_dims : int + An integer or a scalar 'Tensor'. The number of batch dimensions. + + Returns + ------- + A Tensor. Has the same type as params. + """ + + return tf.gather_nd(params, indices, batch_dims) + + +def clip_by_value(t, clip_value_min, clip_value_max): + """ + Clips tensor values to a specified min and max. + + Parameters + ---------- + t : tensor + A Tensor or IndexedSlices + clip_value_min : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + clip_value_max : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + + Returns + ------- + A clipped Tensor or IndexedSlices. + """ + + return tf.clip_by_value(t, clip_value_min, clip_value_max) + + +def split(value, num_or_size_splits, axis=0, num=None): + """ + Splits a tensor into sub tensors. + + Parameters + ---------- + value : tensor + The Tensor to split. + num_or_size_splits : list + Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or + Python list containing the sizes of each output tensor along split_dim. + axis : int + The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. + num : int + used to specify the number of outputs when it cannot be inferred from the shape of size_splits. + + Returns + ------- + Tensor objects resulting from splitting value. + """ + + return tf.split(value=value, num_or_size_splits=num_or_size_splits, axis=axis, num=num) + + +def floor(x): + return tf.floor(x) + + +def gather(params, indices): + return tf.gather(params, indices) + + +def linspace(start, stop, num): + return tf.linspace(start, stop, num) + + +def slice(inputs, starts, sizes): + return tf.slice(inputs, starts, sizes) + + +def add_n(inputs): + return tf.add_n(inputs) + + +class OneHot(object): + + def __init__(self, depth, on_value, off_value, axis, dtype): + self.depth = depth + self.on_value = on_value + self.off_value = off_value + self.axis = axis + self.dtype = dtype + + def __call__(self, inputs, *args, **kwargs): + outputs = tf.one_hot( + inputs, self.depth, on_value=self.on_value, off_value=self.off_value, axis=self.axis, dtype=self.dtype + ) + return outputs + + +class L2Normalize(object): + + def __init__(self, axis=None, epsilon=1e-12): + self.axis = axis + self.epsilon = epsilon + + def __call__(self, input, *args, **kwargs): + outputs = tf.math.l2_normalize(input, axis=self.axis, epsilon=self.epsilon) + return outputs + + +class EmbeddingLookup(object): + + def __init__(self, max_norm=None): + self.max_norm = max_norm + + def __call__(self, params, ids, *args, **kwargs): + outputs = tf.nn.embedding_lookup(params=params, ids=ids, max_norm=self.max_norm) + return outputs + + +class NCELoss(object): + + def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): + self.num_true = num_true + self.sampled_values = sampled_values + self.remove_accidental_hits = remove_accidental_hits + + def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): + outputs = tf.nn.nce_loss( + weights=weights, biases=biases, inputs=inputs, labels=labels, num_sampled=num_sampled, + num_classes=num_classes + ) + return outputs + + +class Not_equal(object): + + def __init__(self): + pass + + def __call__(self, x, y): + return tf.not_equal(x, y) + + +class Count_nonzero(object): + + def __init__(self, keepdims=None, dtype=int64): + self.keepdims = keepdims + self.dtype = dtype + + def __call__(self, input, axis=None): + return tf.math.count_nonzero(input, axis=axis, keepdims=self.keepdims, dtype=self.dtype) + + +class Resize: + + def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): + self.method = method + self.antialias = antialias + self.scale = scale + self.data_format = data_format + + def __call__(self, inputs): + if self.data_format == 'channels_first': + inputs = nchw_to_nhwc(inputs) + if len(get_tensor_shape(inputs)) == 4: + output_size = [int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1])] + else: + raise ("The inputs shape must be 4-D Tensor.") + outputs = tf.image.resize(inputs, size=output_size, method=self.method, antialias=self.antialias) + if self.data_format == 'channels_first': + outputs = nhwc_to_nchw(outputs) + return outputs + + +def resize(inputs, output_size, method, antialias): + return tf.image.resize(inputs, size=output_size, method=method, antialias=antialias) + + +class ZeroPadding1D(object): + + def __init__(self, padding): + self.zeropad = tf.keras.layers.ZeroPadding1D(padding=padding) + + def __call__(self, inputs): + return self.zeropad(inputs) + + +class ZeroPadding2D(object): + + def __init__(self, padding): + self.zeropad = tf.keras.layers.ZeroPadding2D(padding=padding) + + def __call__(self, inputs): + return self.zeropad(inputs) + + +class ZeroPadding3D(object): + + def __init__(self, padding): + self.zeropad = tf.keras.layers.ZeroPadding3D(padding=padding) + + def __call__(self, inputs): + return self.zeropad(inputs) + + +class Sign(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.sign(x) diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py new file mode 100644 index 000000000..5e2d386c5 --- /dev/null +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -0,0 +1,1299 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.training import moving_averages + +# loss function +sparse_softmax_cross_entropy_with_logits = tf.nn.sparse_softmax_cross_entropy_with_logits +sigmoid_cross_entropy_with_logits = tf.nn.sigmoid_cross_entropy_with_logits + + +def padding_format(padding): + """ + Checks that the padding format correspond format. + + Parameters + ---------- + padding : str + Must be one of the following:"same", "SAME", "VALID", "valid" + + Returns + ------- + str "SAME" or "VALID" + """ + + if padding in ["SAME", "same"]: + padding = "SAME" + elif padding in ["VALID", "valid"]: + padding = "VALID" + elif padding == None: + padding = None + else: + raise Exception("Unsupported padding: " + str(padding)) + return padding + + +def preprocess_1d_format(data_format, padding): + """ + Checks that the 1-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NWC","NCW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NWC" or "NCW" and "SAME" or "VALID" + """ + if data_format in ["channels_last", "NWC"]: + data_format = "NWC" + elif data_format in ["channels_first", "NCW"]: + data_format = "NCW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_2d_format(data_format, padding): + """ + Checks that the 2-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NHWC","NCHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NHWC" or "NCHW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NHWC"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW"]: + data_format = "NCHW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_3d_format(data_format, padding): + """ + Checks that the 3-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NDHWC" or "NCDHW" and "SAME" or "VALID" + """ + + if data_format in ['channels_last', 'NDHWC']: + data_format = 'NDHWC' + elif data_format in ['channels_first', 'NCDHW']: + data_format = 'NCDHW' + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def nchw_to_nhwc(x): + """ + Channels first to channels last + + Parameters + ---------- + x : tensor + channels first tensor data + + Returns + ------- + channels last tensor data + """ + + if len(x.shape) == 3: + x = tf.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = tf.transpose(x, (0, 2, 3, 1)) + elif len(x.shape) == 5: + x = tf.transpose(x, (0, 2, 3, 4, 1)) + else: + raise Exception("Unsupported dimensions") + return x + + +def nhwc_to_nchw(x): + """ + Channles last to channels first + + Parameters + ---------- + x : tensor + channels last tensor data + + Returns + ------- + channels first tensor data + """ + + if len(x.shape) == 3: + x = tf.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = tf.transpose(x, (0, 3, 1, 2)) + elif len(x.shape) == 5: + x = tf.transpose(x, (0, 4, 1, 2, 3)) + else: + raise Exception("Unsupported dimensions") + return x + + +class ReLU(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.relu(x) + + +def relu(x): + """ + Computes rectified linear: max(features, 0). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor. Has the same type as features. + """ + + return tf.nn.relu(x) + + +class ReLU6(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.relu6(x) + + +def relu6(x): + """ + Computes Rectified Linear 6: min(max(features, 0), 6). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor with the same type as features. + """ + + return tf.nn.relu6(x) + + +class LeakyReLU(object): + + def __init__(self, alpha=0.2): + self.alpha = alpha + + def __call__(self, x): + return tf.nn.leaky_relu(x, alpha=self.alpha) + + +def leaky_relu(x): + """ + Compute the Leaky ReLU activation function. + + Parameters + ---------- + x : tensor + representing preactivation values. Must be one of the following types: + float16, float32, float64, int32, int64. + + Returns + ------- + The activation value. + """ + + return tf.nn.leaky_relu(x) + + +class Softplus(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.softplus(x) + + +def softplus(x): + """ + Computes softplus: log(exp(features) + 1). + + Parameters + ---------- + x : tensor + Must be one of the following types: half, bfloat16, float32, float64. + + Returns + ------- + A Tensor. Has the same type as features. + """ + + return tf.nn.softplus(x) + + +class Tanh(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.tanh(x) + + +def tanh(x): + """ + Computes hyperbolic tangent of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + + return tf.nn.tanh(x) + + +class Sigmoid(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.sigmoid(x) + + +def sigmoid(x): + """ + Computes sigmoid of x element-wise. + + Parameters + ---------- + x : tensor + A Tensor with type float16, float32, float64, complex64, or complex128. + + Returns + ------- + A Tensor with the same type as x. + """ + + return tf.nn.sigmoid(x) + + +class Softmax(object): + + def __init__(self): + pass + + def __call__(self, x): + return tf.nn.softmax(x) + + +def softmax(logits, axis=None): + """ + Computes softmax activations. + + Parameters + ---------- + logits : tensor + Must be one of the following types: half, float32, float64. + axis : int + The dimension softmax would be performed on. The default is -1 which indicates the last dimension. + + Returns + ------- + A Tensor. Has the same type and shape as logits. + """ + + return tf.nn.softmax(logits, axis) + + +class Dropout(object): + + def __init__(self, keep, seed=0): + self.keep = keep + self.seed = seed + + def __call__(self, inputs, *args, **kwargs): + outputs = tf.nn.dropout(inputs, rate=1 - (self.keep), seed=self.seed) + return outputs + + +class BiasAdd(object): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + Returns + ------- + A Tensor with the same type as value. + """ + + def __init__(self, data_format=None): + self.data_format = data_format + + def __call__(self, x, bias): + return tf.nn.bias_add(x, bias, data_format=self.data_format) + + +def bias_add(x, bias, data_format=None, name=None): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + data_format : A string. + 'N...C' and 'NC...' are supported. + name : str + A name for the operation (optional). + Returns + ------- + A Tensor with the same type as value. + """ + + x = tf.nn.bias_add(x, bias, data_format=data_format, name=name) + return x + + +class Conv1D(object): + + def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None): + self.stride = stride + self.dilations = dilations + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + + def __call__(self, input, filters): + outputs = tf.nn.conv1d( + input=input, + filters=filters, + stride=self.stride, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + # name=name + ) + return outputs + + +def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None): + """ + Computes a 1-D convolution given 3-D input and filter tensors. + + Parameters + ---------- + input : tensor + A 3D Tensor. Must be of type float16, float32, or float64 + filters : tensor + A 3D Tensor. Must have the same type as input. + stride : int of list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'SAME' or 'VALID' + data_format : string + An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of + [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. + dilations : int or list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + Returns + ------- + A Tensor. Has the same type as input. + """ + + data_format, padding = preprocess_1d_format(data_format, padding) + outputs = tf.nn.conv1d( + input=input, + filters=filters, + stride=stride, + padding=padding, + data_format=data_format, + dilations=dilations, + # name=name + ) + return outputs + + +class Conv2D(object): + + def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): + self.strides = strides + self.dilations = dilations + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + + def __call__(self, input, filters): + outputs = tf.nn.conv2d( + input=input, + filters=filters, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + return outputs + + +def conv2d(input, filters, strides, padding, data_format='NHWC', dilations=None): + """ + Computes a 2-D convolution given 4-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. + The dimension order is interpreted according to the value of data_format, see below for details. + filters : tensor + Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] + strides : int of list + The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. + padding : string + "SAME" or "VALID" + data_format : string + "NHWC", "NCHW". Defaults to "NHWC". + dilations : list or ints + list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as input. + """ + + data_format, padding = preprocess_2d_format(data_format, padding) + outputs = tf.nn.conv2d( + input=input, + filters=filters, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + ) + return outputs + + +class Conv3D(object): + + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): + self.strides = strides + self.dilations = dilations + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + def __call__(self, input, filters): + outputs = tf.nn.conv3d( + input=input, + filters=filters, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + return outputs + + +def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None): + """ + Computes a 3-D convolution given 5-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. + Shape [batch, in_depth, in_height, in_width, in_channels]. + filters : tensor + Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. + in_channels must match between input and filters. + strides : list of ints + A list of ints that has length >= 5. 1-D tensor of length 5. + The stride of the sliding window for each dimension of input. + Must have strides[0] = strides[4] = 1. + padding : string + A string from: "SAME", "VALID". The type of padding algorithm to use. + data_format : string + An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + dilations : list of ints + Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. + If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of data_format, see above for details. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as input. + """ + + data_format, padding = preprocess_3d_format(data_format, padding) + outputs = tf.nn.conv3d( + input=input, + filters=filters, + strides=strides, + padding=padding, + data_format=data_format, # 'NDHWC', + dilations=dilations, # [1, 1, 1, 1, 1], + # name=name, + ) + return outputs + + +def lrn(inputs, depth_radius, bias, alpha, beta): + """ + Local Response Normalization. + + Parameters + ---------- + inputs : tensor + Must be one of the following types: half, bfloat16, float32. 4-D. + depth_radius : int + Defaults to 5. 0-D. Half-width of the 1-D normalization window. + bias : float + Defaults to 1. An offset (usually positive to avoid dividing by 0). + alpha : float + Defaults to 1. A scale factor, usually positive. + beta : float + Defaults to 0.5. An exponent. + + Returns + ------- + A Tensor. Has the same type as input. + """ + + outputs = tf.nn.lrn(inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + return outputs + + +def moments(x, axes, shift=None, keepdims=False): + """ + Calculates the mean and variance of x. + + Parameters + ---------- + x : tensor + A Tensor + axes : ints + Axes along which to compute mean and variance. + shift : int + Not used in the current implementation. + keepdims : bool + produce moments with the same dimensionality as the input. + + Returns + ------- + Two Tensor objects: mean and variance. + """ + + outputs = tf.nn.moments(x, axes, shift, keepdims) + return outputs + + +class MaxPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.ksize = ksize + self.strides = strides + self.data_format = data_format + self.padding = padding + + def __call__(self, inputs): + if inputs.ndim == 3: + self.data_format, self.padding = preprocess_1d_format(data_format=self.data_format, padding=self.padding) + elif inputs.ndim == 4: + self.data_format, self.padding = preprocess_2d_format(data_format=self.data_format, padding=self.padding) + elif inputs.ndim == 5: + self.data_format, self.padding = preprocess_3d_format(data_format=self.data_format, padding=self.padding) + + outputs = tf.nn.max_pool( + input=inputs, ksize=self.ksize, strides=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + +def max_pool(input, ksize, strides, padding, data_format=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start + with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". + Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + + if input.ndim == 3: + data_format, padding = preprocess_1d_format(data_format=data_format, padding=padding) + elif input.ndim == 4: + data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) + elif input.ndim == 5: + data_format, padding = preprocess_3d_format(data_format=data_format, padding=padding) + + outputs = tf.nn.max_pool(input=input, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + return outputs + + +class AvgPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.ksize = ksize + self.strides = strides + self.data_format = data_format + self.padding = padding_format(padding) + + def __call__(self, inputs): + outputs = tf.nn.avg_pool( + input=inputs, ksize=self.ksize, strides=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + +def avg_pool(input, ksize, strides, padding): + """ + Performs the avg pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + name : string + Optional name for the operation. + + Returns + ------- + A Tensor of format specified by data_format. The average pooled output tensor. + """ + + padding = padding_format(padding) + outputs = tf.nn.avg_pool( + input=input, + ksize=ksize, + strides=strides, + padding=padding, + ) + return outputs + + +def max_pool3d(input, ksize, strides, padding, data_format=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of the format specified by data_format. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + + data_format, padding = preprocess_3d_format(data_format, padding) + outputs = tf.nn.max_pool3d( + input=input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + ) + return outputs + + +def avg_pool3d(input, ksize, strides, padding, data_format=None): + """ + Performs the average pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + name : string + Optional name for the operation. + + Returns + ------- + A Tensor with the same type as value. The average pooled output tensor. + """ + + data_format, padding = preprocess_3d_format(data_format, padding) + outputs = tf.nn.avg_pool3d( + input=input, + ksize=ksize, + strides=strides, + padding=padding, + data_format=data_format, + ) + return outputs + + +def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): + """ + Performs an N-D pooling operation. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + window_shape : int + Sequence of N ints >= 1. + pooling_type : string + Specifies pooling operation, must be "AVG" or "MAX". + strides : ints + Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. + padding : string + The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". + See the "returns" section of tf.ops.convolution for details. + data_format : string + Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), + or the second dimension (if data_format starts with "NC"). + For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations : list of ints + Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. + name : string + Optional. Name of the op. + + Returns + ------- + Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] + """ + if pooling_type in ["MAX", "max"]: + pooling_type = "MAX" + elif pooling_type in ["AVG", "avg"]: + pooling_type = "AVG" + else: + raise ValueError('Unsupported pool_mode: ' + str(pooling_type)) + padding = padding_format(padding) + outputs = tf.nn.pool( + input=input, + window_shape=window_shape, + pooling_type=pooling_type, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name, + ) + return outputs + + +class DepthwiseConv2d(object): + + def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.strides = strides + self.dilations = dilations + + def __call__(self, input, filter): + outputs = tf.nn.depthwise_conv2d( + input=input, + filter=filter, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + return outputs + + +def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): + """ + Depthwise 2-D convolution. + + Parameters + ---------- + input : tensor + 4-D with shape according to data_format. + filter : tensor + 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. + strides : list + 1-D of size 4. The stride of the sliding window for each dimension of input. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + The data format for input. Either "NHWC" (default) or "NCHW". + dilations : list + 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. + If it is greater than 1, then all values of strides must be 1. + name : string + A name for this operation (optional). + + Returns + ------- + A 4-D Tensor with shape according to data_format. + E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. + """ + + data_format, padding = preprocess_2d_format(data_format, padding) + outputs = tf.nn.depthwise_conv2d( + input=input, + filter=filter, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name, + ) + return outputs + + +def conv1d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None +): + """ + The transpose of conv1d. + + Parameters + ---------- + input : tensor + A 3-D Tensor of type float and shape [batch, in_width, in_channels] + for NWC data format or [batch, in_channels, in_width] for NCW data format. + filters : tensor + A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NWC' and 'NCW' are supported. + dilations : list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + + data_format, padding = preprocess_1d_format(data_format, padding) + outputs = tf.nn.conv1d_transpose( + input=input, + filters=filters, + output_shape=output_shape, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name, + ) + return outputs + + +def conv2d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None +): + """ + The transpose of conv2d. + + Parameters + ---------- + input : tensor + A 4-D Tensor of type float and shape [batch, height, width, in_channels] + for NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 4-D Tensor with the same type as input and shape [height, width, + output_channels, in_channels]. filter's in_channels dimension must match that of input. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. + If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 0. + The dimension order is determined by the value of data_format, see below for details. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NHWC' and 'NCHW' are supported. + dilations : list + An int or list of ints that has length 1, 2 or 4, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as input. + """ + + data_format, padding = preprocess_2d_format(data_format, padding) + outputs = tf.nn.conv2d_transpose( + input=input, + filters=filters, + output_shape=output_shape, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name, + ) + return outputs + + +def conv3d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None +): + """ + The transpose of conv3d. + + Parameters + ---------- + input : tensor + A 5-D Tensor of type float and shape [batch, height, width, in_channels] for + NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 3 or 5. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + dilations : list of ints + An int or list of ints that has length 1, 3 or 5, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + + data_format, padding = preprocess_3d_format(data_format, padding) + outputs = tf.nn.conv3d_transpose( + input=input, filters=filters, output_shape=output_shape, strides=strides, padding=padding, + data_format=data_format, dilations=dilations, name=name + ) + return outputs + + +def depthwise_conv2d(input, filters, strides, padding='SAME', data_format='NHWC', dilations=None, name=None): + """ + Depthwise 2-D convolution. + + Parameters + ---------- + input : tensor + 4-D with shape according to data_format. + filters : tensor + 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. + strides : tuple + 1-D of size 4. The stride of the sliding window for each dimension of input. + padding : string + 'VALID' or 'SAME' + data_format : string + "NHWC" (default) or "NCHW". + dilations : tuple + The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. + If it is greater than 1, then all values of strides must be 1. + name : string + A name for this operation (optional). + + Returns + ------- + A 4-D Tensor with shape according to data_format. + """ + + data_format, padding = preprocess_2d_format(data_format, padding) + outputs = tf.nn.depthwise_conv2d( + input=input, + filter=filters, + strides=strides, + padding=padding, + data_format=data_format, + dilations=dilations, + name=name, + ) + return outputs + + +def _to_channel_first_bias(b): + """Reshape [c] to [c, 1, 1].""" + channel_size = int(b.shape[0]) + new_shape = (channel_size, 1, 1) + return tf.reshape(b, new_shape) + + +def _bias_scale(x, b, data_format): + """The multiplication counter part of tf.nn.bias_add.""" + if data_format == 'NHWC': + return x * b + elif data_format == 'NCHW': + return x * _to_channel_first_bias(b) + else: + raise ValueError('invalid data_format: %s' % data_format) + + +def _bias_add(x, b, data_format): + """Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.""" + if data_format == 'NHWC': + return tf.add(x, b) + elif data_format == 'NCHW': + return tf.add(x, _to_channel_first_bias(b)) + else: + raise ValueError('invalid data_format: %s' % data_format) + + +def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None): + """Data Format aware version of tf.nn.batch_normalization.""" + if data_format == 'channels_last': + mean = tf.reshape(mean, [1] * (len(x.shape) - 1) + [-1]) + variance = tf.reshape(variance, [1] * (len(x.shape) - 1) + [-1]) + offset = tf.reshape(offset, [1] * (len(x.shape) - 1) + [-1]) + scale = tf.reshape(scale, [1] * (len(x.shape) - 1) + [-1]) + elif data_format == 'channels_first': + mean = tf.reshape(mean, [1] + [-1] + [1] * (len(x.shape) - 2)) + variance = tf.reshape(variance, [1] + [-1] + [1] * (len(x.shape) - 2)) + offset = tf.reshape(offset, [1] + [-1] + [1] * (len(x.shape) - 2)) + scale = tf.reshape(scale, [1] + [-1] + [1] * (len(x.shape) - 2)) + else: + raise ValueError('invalid data_format: %s' % data_format) + + with ops.name_scope(name, 'batchnorm', [x, mean, variance, scale, offset]): + inv = math_ops.rsqrt(variance + variance_epsilon) + if scale is not None: + inv *= scale + + a = math_ops.cast(inv, x.dtype) + b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype) + # Return a * x + b with customized data_format. + # Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add + # So we reimplemted them to allow make the model work with tensorRT. + # See https://github.com/tensorlayer/openpose-plus/issues/75 for more details. + # df = {'channels_first': 'NCHW', 'channels_last': 'NHWC'} + # return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format]) + return a * x + b + + +class BatchNorm(object): + """ + The :class:`BatchNorm` is a batch normalization layer for both fully-connected and convolution outputs. + See ``tf.nn.batch_normalization`` and ``tf.nn.moments``. + + Parameters + ---------- + decay : float + A decay factor for `ExponentialMovingAverage`. + Suggest to use a large value for large dataset. + epsilon : float + Eplison. + act : activation function + The activation function of this layer. + is_train : boolean + Is being used for training or inference. + beta_init : initializer or None + The initializer for initializing beta, if None, skip beta. + Usually you should not skip beta unless you know what happened. + gamma_init : initializer or None + The initializer for initializing gamma, if None, skip gamma. + When the batch normalization layer is use instead of 'biases', or the next layer is linear, this can be + disabled since the scaling can be done by the next layer. see `Inception-ResNet-v2 `__ + moving_mean_init : initializer or None + The initializer for initializing moving mean, if None, skip moving mean. + moving_var_init : initializer or None + The initializer for initializing moving var, if None, skip moving var. + num_features: int + Number of features for input tensor. Useful to build layer if using BatchNorm1d, BatchNorm2d or BatchNorm3d, + but should be left as None if using BatchNorm. Default None. + data_format : str + channels_last 'channel_last' (default) or channels_first. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.BatchNorm()(net) + + Notes + ----- + The :class:`BatchNorm` is universally suitable for 3D/4D/5D input in static model, but should not be used + in dynamic model where layer is built upon class initialization. So the argument 'num_features' should only be used + for subclasses :class:`BatchNorm1d`, :class:`BatchNorm2d` and :class:`BatchNorm3d`. All the three subclasses are + suitable under all kinds of conditions. + + References + ---------- + - `Source `__ + - `stackoverflow `__ + + """ + + def __init__( + self, decay=0.9, epsilon=0.00001, beta=None, gamma=None, moving_mean=None, moving_var=None, num_features=None, + data_format='channels_last', is_train=False + ): + self.decay = decay + self.epsilon = epsilon + self.data_format = data_format + self.beta = beta + self.gamma = gamma + self.moving_mean = moving_mean + self.moving_var = moving_var + self.num_features = num_features + self.is_train = is_train + self.axes = None + + if self.decay < 0.0 or 1.0 < self.decay: + raise ValueError("decay should be between 0 to 1") + + def _get_param_shape(self, inputs_shape): + if self.data_format == 'channels_last': + axis = -1 + elif self.data_format == 'channels_first': + axis = 1 + else: + raise ValueError('data_format should be either %s or %s' % ('channels_last', 'channels_first')) + + channels = inputs_shape[axis] + params_shape = [channels] + + return params_shape + + def _check_input_shape(self, inputs): + if inputs.ndim <= 1: + raise ValueError('expected input at least 2D, but got {}D input'.format(inputs.ndim)) + + def __call__(self, inputs): + self._check_input_shape(inputs) + self.channel_axis = len(inputs.shape) - 1 if self.data_format == 'channels_last' else 1 + if self.axes is None: + self.axes = [i for i in range(len(inputs.shape)) if i != self.channel_axis] + + mean, var = tf.nn.moments(inputs, self.axes, keepdims=False) + if self.is_train: + # update moving_mean and moving_var + self.moving_mean = moving_averages.assign_moving_average( + self.moving_mean, mean, self.decay, zero_debias=False + ) + self.moving_var = moving_averages.assign_moving_average(self.moving_var, var, self.decay, zero_debias=False) + outputs = batch_normalization(inputs, mean, var, self.beta, self.gamma, self.epsilon, self.data_format) + else: + outputs = batch_normalization( + inputs, self.moving_mean, self.moving_var, self.beta, self.gamma, self.epsilon, self.data_format + ) + + return outputs diff --git a/tensorlayer/cost/__init__.py b/tensorlayer/cost/__init__.py new file mode 100644 index 000000000..eb18aae26 --- /dev/null +++ b/tensorlayer/cost/__init__.py @@ -0,0 +1,13 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from tensorlayer.backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_cost import * +elif BACKEND == 'mindspore': + from .mindspore_cost import * +elif BACKEND == 'dragon': + pass +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py new file mode 100644 index 000000000..694c5fc83 --- /dev/null +++ b/tensorlayer/cost/mindspore_cost.py @@ -0,0 +1,763 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from mindspore import nn +from mindspore.nn import Cell +import mindspore.ops as P + +__all__ = [ + 'cross_entropy', + 'sigmoid_cross_entropy', + 'binary_cross_entropy', + 'mean_squared_error', + 'normalized_mean_square_error', + 'absolute_difference_error', + 'dice_coe', + 'dice_hard_coe', + 'iou_coe', + 'cross_entropy_seq', + 'cross_entropy_seq_with_mask', + 'cosine_similarity', + 'li_regularizer', + 'lo_regularizer', + 'maxnorm_regularizer', + 'maxnorm_o_regularizer', + 'maxnorm_i_regularizer', +] + +cross_entropy = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') + + +def sigmoid_cross_entropy(output, target, name=None): + """Sigmoid cross-entropy operation, see ``tf.ops.sigmoid_cross_entropy_with_logits``. + + Parameters + ---------- + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor + A batch of index with shape: [batch_size, ]. + name : string + Name of this loss. + + """ + outputs = P.ReduceMean(cross_entropy(output, target)) + return outputs + + +def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'): + """Binary cross entropy operation. + + Parameters + ---------- + output : Tensor + Tensor with type of `float32` or `float64`. + target : Tensor + The target distribution, format the same with `output`. + epsilon : float + A small value to avoid output to be zero. + name : str + An optional name to attach to this function. + + References + ----------- + - `ericjang-DRAW `__ + + """ + + # return tf.reduce_mean( + # tf.reduce_sum( + # -(target * tf.math.log(output + epsilon) + (1. - target) * tf.math.log(1. - output + epsilon)), axis=1 + # ), name=name + # ) + raise NotImplementedError("Not Implemented.") + + +def mean_squared_error(output, target, is_mean=False, axis=-1, name="mean_squared_error"): + """Return the TensorFlow expression of mean-square-error (L2) of two batch of data. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + References + ------------ + - `Wiki Mean Squared Error `__ + + """ + # with tf.name_scope(name): + # if len(output.shape) == 2: # [batch_size, n_feature] + # axis = 1 + # elif len(output.shape) == 3: # [batch_size, w, h] + # axis = [1, 2] + # elif len(output.shape) == 4: # [batch_size, w, h, c] + # axis = [1, 2, 3] + # else: + # raise Exception("Unknow dimension") + + return nn.MSELoss()(output, target) + + +def normalized_mean_square_error(output, target, axis=-1, name="normalized_mean_squared_error_loss"): + """Return the TensorFlow expression of normalized mean-square-error of two distributions. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + """ + # with tf.name_scope("normalized_mean_squared_error_loss"): + # nmse_a = tf.sqrt(tf.reduce_sum(tf.math.squared_difference(output, target), axis=axis)) + # nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=axis)) + # nmse = tf.reduce_mean(nmse_a / nmse_b, name=name) + raise NotImplementedError("Not Implemented.") + + +def absolute_difference_error(output, target, is_mean=False, axis=-1, name="absolute_difference_error_loss"): + """Return the TensorFlow expression of absolute difference error (L1) of two batch of data. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + """ + + # if is_mean: + # loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), axis), name=name) + # else: + # loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), axis), name=name) + raise NotImplementedError("Not Implemented.") + + +def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): + """Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation + i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match. + + Parameters + ----------- + output : Tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : Tensor + The target distribution, format the same with `output`. + loss_type : str + ``jaccard`` or ``sorensen``, default is ``jaccard``. + axis : tuple of int + All dimensions are reduced, default ``[1,2,3]``. + smooth : float + This small value will be added to the numerator and denominator. + - If both output and target are empty, it makes sure dice is 1. + - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice. + + Examples + --------- + >>> import tensorlayer as tl + >>> outputs = tl.act.pixel_wise_softmax(outputs) + >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_) + + References + ----------- + - `Wiki-Dice `__ + + """ + # inse = tf.reduce_sum(output * target, axis=axis) + # if loss_type == 'jaccard': + # l = tf.reduce_sum(output * output, axis=axis) + # r = tf.reduce_sum(target * target, axis=axis) + # elif loss_type == 'sorensen': + # l = tf.reduce_sum(output, axis=axis) + # r = tf.reduce_sum(target, axis=axis) + # else: + # raise Exception("Unknow loss_type") + # dice = (2. * inse + smooth) / (l + r + smooth) + # ## + # dice = tf.reduce_mean(dice, name='dice_coe') + raise NotImplementedError("Not Implemented.") + + +def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Sørensen–Dice coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation i.e. labels are binary. + The coefficient between 0 to 1, 1 if totally match. + + Parameters + ----------- + output : tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : tensor + The target distribution, format the same with `output`. + threshold : float + The threshold value to be true. + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. + + References + ----------- + - `Wiki-Dice `__ + + """ + # output = tf.cast(output > threshold, dtype=tf.float32) + # target = tf.cast(target > threshold, dtype=tf.float32) + # inse = tf.reduce_sum(tf.multiply(output, target), axis=axis) + # l = tf.reduce_sum(output, axis=axis) + # r = tf.reduce_sum(target, axis=axis) + # hard_dice = (2. * inse + smooth) / (l + r + smooth) + # ## + # hard_dice = tf.reduce_mean(hard_dice, name='hard_dice') + raise NotImplementedError("Not Implemented.") + + +def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Intersection over Union (IoU) for comparing the + similarity of two batch of data, usually be used for evaluating binary image segmentation. + The coefficient between 0 to 1, and 1 means totally match. + + Parameters + ----------- + output : tensor + A batch of distribution with shape: [batch_size, ....], (any dimensions). + target : tensor + The target distribution, format the same with `output`. + threshold : float + The threshold value to be true. + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. + + Notes + ------ + - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. + + """ + # pre = tf.cast(output > threshold, dtype=tf.float32) + # truth = tf.cast(target > threshold, dtype=tf.float32) + # inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND + # union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR + # batch_iou = (inse + smooth) / (union + smooth) + # iou = tf.reduce_mean(batch_iou, name='iou_coe') + raise NotImplementedError("Not Implemented.") + + +def sequence_loss_by_example( + logits, targets, weights, average_across_timesteps=True, softmax_loss_function=None, name=None +): + """Weighted cross-entropy loss for a sequence of logits (per example). see original tensorflow code : + + + Parameters + ---------- + logits: List + List of 2D Tensors of shape [batch_size x num_decoder_symbols]. + targets: List + List of 1D batch-sized int32 Tensors of the same length as logits. + weights: List + List of 1D batch-sized float-Tensors of the same length as logits. + average_across_timesteps: Boolean + If set, divide the returned cost by the total label weight. + softmax_loss_function: None or Function + Function (labels, logits) -> loss-batch to be used instead of the standard softmax (the default if this is None). + **Note that to avoid confusion, it is required for the function to accept named arguments.** + name: None or str + Optional name for this operation, default: "sequence_loss_by_example". + + Returns + ------- + 1D batch-sized float Tensor: The log-perplexity for each sequence. + + Raises + ------ + ValueError: If len(logits) is different from len(targets) or len(weights). + + """ + # if len(targets) != len(logits) or len(weights) != len(logits): + # raise ValueError( + # "Lengths of logits, weights, and targets must be the same " + # "%d, %d, %d." % (len(logits), len(weights), len(targets)) + # ) + # with ops.name_scope(name, "sequence_loss_by_example", logits + targets + weights): + # log_perp_list = [] + # for logit, target, weight in zip(logits, targets, weights): + # if softmax_loss_function is None: + # # TODO(irving,ebrevdo): This reshape is needed because + # # sequence_loss_by_example is called with scalars sometimes, which + # # violates our general scalar strictness policy. + # target = array_ops.reshape(target, [-1]) + # crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(labels=target, logits=logit) + # else: + # crossent = softmax_loss_function(labels=target, logits=logit) + # log_perp_list.append(crossent * weight) + # log_perps = math_ops.add_n(log_perp_list) + # if average_across_timesteps: + # total_size = math_ops.add_n(weights) + # total_size += 1e-12 # Just to avoid division by 0 for all-0 weights. + # log_perps /= total_size + raise NotImplementedError("Not Implemented.") + + +def cross_entropy_seq(logits, target_seqs, batch_size=None): + """Returns the expression of cross-entropy of two sequences, implement + softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__. + + Parameters + ---------- + logits : Tensor + 2D tensor with shape of `[batch_size * n_steps, n_classes]`. + target_seqs : Tensor + The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead. + batch_size : None or int. + Whether to divide the cost by batch size. + - If integer, the return cost will be divided by `batch_size`. + - If None (default), the return cost will not be divided by anything. + + Examples + -------- + >>> import tensorlayer as tl + >>> # see `PTB example `__.for more details + >>> # outputs shape : (batch_size * n_steps, n_classes) + >>> # targets shape : (batch_size, n_steps) + >>> cost = tl.cost.cross_entropy_seq(outputs, targets) + + """ + # sequence_loss_by_example_fn = sequence_loss_by_example + # + # loss = sequence_loss_by_example_fn( + # [logits], [tf.reshape(target_seqs, [-1])], [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)] + # ) + # # [tf.ones([batch_size * num_steps])]) + # cost = tf.reduce_sum(loss) # / batch_size + # if batch_size is not None: + # cost = cost / batch_size + raise NotImplementedError("Not Implemented.") + + +def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None): + """Returns the expression of cross-entropy of two sequences, implement + softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output. + + Parameters + ----------- + logits : Tensor + 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example. + - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`. + target_seqs : Tensor + int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example. + input_mask : Tensor + The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1. + return_details : boolean + Whether to return detailed losses. + - If False (default), only returns the loss. + - If True, returns the loss, losses, weights and targets (see source code). + + Examples + -------- + >>> import tensorlayer as tl + >>> import tensorflow as tf + >>> import numpy as np + >>> batch_size = 64 + >>> vocab_size = 10000 + >>> embedding_size = 256 + >>> ni = tl.layers.Input([batch_size, None], dtype=tf.int64) + >>> net = tl.layers.Embedding( + ... vocabulary_size = vocab_size, + ... embedding_size = embedding_size, + ... name = 'seq_embedding')(ni) + >>> net = tl.layers.RNN( + ... cell =tf.keras.layers.LSTMCell(units=embedding_size, dropout=0.1), + ... return_seq_2d = True, + ... name = 'dynamicrnn')(net) + >>> net = tl.layers.Dense(n_units=vocab_size, name="output")(net) + >>> model = tl.models.Model(inputs=ni, outputs=net) + >>> input_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) + >>> target_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) + >>> input_mask = np.random.randint(0, 2, size=(batch_size, 10), dtype=np.int64) + >>> outputs = model(input_seqs, is_train=True) + >>> loss = tl.cost.cross_entropy_seq_with_mask(outputs, target_seqs, input_mask) + + """ + # targets = tf.reshape(target_seqs, [-1]) # to one vector + # weights = tf.cast(tf.reshape(input_mask, [-1]), dtype=tf.float32) # to one vector like targets + # losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights + # # losses = tf.reduce_mean(tf.ops.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others + # + # loss = tf.divide( + # tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! + # tf.reduce_sum(weights), + # name="seq_loss_with_mask" + # ) + # + # if return_details: + # return loss, losses, weights, targets + # else: + # return loss + raise NotImplementedError("Not Implemented.") + + +def cosine_similarity(v1, v2): + """Cosine similarity [-1, 1]. + + Parameters + ---------- + v1, v2 : Tensor + Tensor with the same shape [batch_size, n_feature]. + + References + ---------- + - `Wiki `__. + + """ + + # return tf.reduce_sum(tf.multiply(v1, v2), 1) / \ + # (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * + # tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1))) + raise NotImplementedError("Not Implemented.") + + +# Regularization Functions +def li_regularizer(scale, scope=None): + """Li regularization removes the neurons of previous layer. The `i` represents `inputs`. + Returns a function that can be used to apply group li regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + scope: str + An optional scope name for this function. + + Returns + -------- + A function with signature `li(weights, name=None)` that apply Li regularization. + + Raises + ------ + ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + # if isinstance(scale, numbers.Integral): + # raise ValueError('scale cannot be an integer: %s' % scale) + # if isinstance(scale, numbers.Real): + # if scale < 0.: + # raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # if scale >= 1.: + # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale) + # if scale == 0.: + # logging.info('Scale of 0 disables regularizer.') + # return lambda _, name=None: None + # + # def li(weights): + # """Applies li regularization to weights.""" + # with tf.name_scope('li_regularizer') as scope: + # my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # # if tf.__version__ <= '0.12': + # # standard_ops_fn = standard_ops.mul + # # else: + # standard_ops_fn = standard_ops.multiply + # return standard_ops_fn( + # my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), + # name=scope + # ) + + raise NotImplementedError("Not Implemented.") + + +def lo_regularizer(scale): + """Lo regularization removes the neurons of current layer. The `o` represents `outputs` + Returns a function that can be used to apply group lo regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + ------- + A function with signature `lo(weights, name=None)` that apply Lo regularization. + + Raises + ------ + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + # if isinstance(scale, numbers.Integral): + # raise ValueError('scale cannot be an integer: %s' % scale) + # + # if isinstance(scale, numbers.Real): + # if scale < 0.: + # raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # if scale >= 1.: + # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale) + # if scale == 0.: + # logging.info('Scale of 0 disables regularizer.') + # return lambda _, name=None: None + # + # def lo(weights, name='lo_regularizer'): + # """Applies group column regularization to weights.""" + # with tf.name_scope(name) as scope: + # my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # # if tf.__version__ <= '0.12': + # # standard_ops_fn = standard_ops.mul + # # else: + # standard_ops_fn = standard_ops.multiply + # return standard_ops_fn( + # my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), + # name=scope + # ) + + raise NotImplementedError("Not Implemented.") + + +def maxnorm_regularizer(scale=1.0): + """Max-norm regularization returns a function that can be used to apply max-norm regularization to weights. + + More about max-norm, see `wiki-max norm `_. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn(weights, name=None)` that apply Lo regularization. + + Raises + -------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + # if isinstance(scale, numbers.Integral): + # raise ValueError('scale cannot be an integer: %s' % scale) + # + # if isinstance(scale, numbers.Real): + # if scale < 0.: + # raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # # if scale >= 1.: + # # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # # scale) + # if scale == 0.: + # logging.info('Scale of 0 disables regularizer.') + # return lambda _, name=None: None + # + # def mn(weights, name='max_regularizer'): + # """Applies max-norm regularization to weights.""" + # with tf.name_scope(name) as scope: + # my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # # if tf.__version__ <= '0.12': + # # standard_ops_fn = standard_ops.mul + # # else: + # standard_ops_fn = standard_ops.multiply + # return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope) + # + # return mn + raise NotImplementedError("Not Implemented.") + + +def maxnorm_o_regularizer(scale): + """Max-norm output regularization removes the neurons of current layer. + Returns a function that can be used to apply max-norm regularization to each column of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_o(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + # if isinstance(scale, numbers.Integral): + # raise ValueError('scale cannot be an integer: %s' % scale) + # + # if isinstance(scale, numbers.Real): + # if scale < 0.: + # raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # # if scale >= 1.: + # # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # # scale) + # if scale == 0.: + # logging.info('Scale of 0 disables regularizer.') + # return lambda _, name=None: None + # + # def mn_o(weights, name='maxnorm_o_regularizer'): + # """Applies max-norm regularization to weights.""" + # with tf.name_scope(name) as scope: + # my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # if tf.__version__ <= '0.12': + # standard_ops_fn = standard_ops.mul + # else: + # standard_ops_fn = standard_ops.multiply + # return standard_ops_fn( + # my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope + # ) + # + # return mn_o + raise NotImplementedError("Not Implemented.") + + +def maxnorm_i_regularizer(scale): + """Max-norm input regularization removes the neurons of previous layer. + Returns a function that can be used to apply max-norm regularization to each row of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_i(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + # if isinstance(scale, numbers.Integral): + # raise ValueError('scale cannot be an integer: %s' % scale) + # + # if isinstance(scale, numbers.Real): + # if scale < 0.: + # raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # # if scale >= 1.: + # # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # # scale) + # if scale == 0.: + # logging.info('Scale of 0 disables regularizer.') + # return lambda _, name=None: None + # + # def mn_i(weights, name='maxnorm_i_regularizer'): + # """Applies max-norm regularization to weights.""" + # with tf.name_scope(name) as scope: + # my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # if tf.__version__ <= '0.12': + # standard_ops_fn = standard_ops.mul + # else: + # standard_ops_fn = standard_ops.multiply + # return standard_ops_fn( + # my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope + # ) + # + # return mn_i + raise NotImplementedError("Not Implemented.") + + +def huber_loss( + output, target, is_mean=True, delta=1.0, dynamichuber=False, reverse=False, axis=-1, epsilon=0.00001, name=None +): + """Huber Loss operation, see ``https://en.wikipedia.org/wiki/Huber_loss`` . + Reverse Huber Loss operation, see ''https://statweb.stanford.edu/~owen/reports/hhu.pdf''. + Dynamic Reverse Huber Loss operation, see ''https://arxiv.org/pdf/1606.00373.pdf''. + + Parameters + ---------- + output : Tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data (default). + - If False, use ``tf.reduce_sum``. + delta: float + The point where the huber loss function changes from a quadratic to linear. + dynamichuber: boolean + Whether compute the coefficient c for each batch. + - If True, c is 20% of the maximal per-batch error. + - If False, c is delta. + reverse: boolean + Whether compute the reverse huber loss. + axis : int or list of int + The dimensions to reduce. + epsilon: + Eplison. + name : string + Name of this loss. + + """ + # if reverse: + # if dynamichuber: + # huber_c = 0.2 * tf.reduce_max(tf.abs(output - target)) + # else: + # huber_c = delta + # if is_mean: + # loss = tf.reduce_mean( + # tf.where( + # tf.less_equal(tf.abs(output - target), huber_c), tf.abs(output - target), + # tf.multiply( + # tf.pow(output - target, 2.0) + tf.pow(huber_c, 2.0), + # tf.math.divide_no_nan(.5, huber_c + epsilon) + # ) + # ), name=name + # ) + # else: + # loss = tf.reduce_mean( + # tf.reduce_sum( + # tf.where( + # tf.less_equal(tf.abs(output - target), huber_c), tf.abs(output - target), + # tf.multiply( + # tf.pow(output - target, 2.0) + tf.pow(huber_c, 2.0), + # tf.math.divide_no_nan(.5, huber_c + epsilon) + # ) + # ), axis + # ), name=name + # ) + # elif is_mean: + # loss = tf.reduce_mean( + # tf.where( + # tf.less_equal(tf.abs(output - target), delta), 0.5 * tf.pow(output - target, 2), + # delta * (tf.abs(output - target) - 0.5 * delta) + # ), name=name + # ) + # else: + # loss = tf.reduce_mean( + # tf.reduce_sum( + # tf.where( + # tf.less_equal(tf.abs(output - target), delta), 0.5 * tf.pow(output - target, 2), + # delta * (tf.abs(output - target) - 0.5 * delta) + # ), axis + # ), name=name + # ) + # return loss + raise NotImplementedError("Not Implemented.") diff --git a/tensorlayer/cost.py b/tensorlayer/cost/tensorflow_cost.py similarity index 99% rename from tensorlayer/cost.py rename to tensorlayer/cost/tensorflow_cost.py index 9ccf5eeca..b07acad19 100644 --- a/tensorlayer/cost.py +++ b/tensorlayer/cost/tensorflow_cost.py @@ -32,7 +32,7 @@ def cross_entropy(output, target, name=None): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, - it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``. + it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. Parameters ---------- @@ -60,7 +60,7 @@ def cross_entropy(output, target, name=None): def sigmoid_cross_entropy(output, target, name=None): - """Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``. + """Sigmoid cross-entropy operation, see ``tf.ops.sigmoid_cross_entropy_with_logits``. Parameters ---------- @@ -512,7 +512,7 @@ def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details= targets = tf.reshape(target_seqs, [-1]) # to one vector weights = tf.cast(tf.reshape(input_mask, [-1]), dtype=tf.float32) # to one vector like targets losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights - # losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others + # losses = tf.reduce_mean(tf.ops.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others loss = tf.divide( tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py new file mode 100644 index 000000000..b6fbab28e --- /dev/null +++ b/tensorlayer/dataflow/__init__.py @@ -0,0 +1,5 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function + +from .common import Dataset diff --git a/tensorlayer/dataflow/base.py b/tensorlayer/dataflow/base.py new file mode 100644 index 000000000..41450f8be --- /dev/null +++ b/tensorlayer/dataflow/base.py @@ -0,0 +1,18 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np + + +def generator(X_train, y_train=None): + inputs = X_train + targets = y_train + if targets is None: + for _input in X_train: + yield _input + else: + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield (_input, np.array([_target])) diff --git a/tensorlayer/dataflow/common.py b/tensorlayer/dataflow/common.py new file mode 100644 index 000000000..aadd3dbd8 --- /dev/null +++ b/tensorlayer/dataflow/common.py @@ -0,0 +1,34 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from .load_data_backend import * + + +class Dataset(object): + + def __init__(self): + pass + + @staticmethod + def from_generator(generator, output_types, output_shapes=None, args=None): + return FromGenerator(generator, output_types, output_shapes=output_shapes, args=args) + + @staticmethod + def map(ds, map_func, num_parallel_calls=None): + return Map(ds=ds, map_func=map_func, num_parallel_calls=num_parallel_calls) + + @staticmethod + def shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): + return Shuffle(ds=ds, buffer_size=buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) + + @staticmethod + def prefetch(ds, buffer_size): + return Prefetch(ds=ds, buffer_size=buffer_size) + + @staticmethod + def batch(ds, batch_size, drop_remainder=False): + return Batch(ds=ds, batch_size=batch_size, drop_remainder=drop_remainder) + + @staticmethod + def repeat(ds, count): + return Repeat(ds, count=count) diff --git a/tensorlayer/dataflow/dataflow_examples.py b/tensorlayer/dataflow/dataflow_examples.py new file mode 100644 index 000000000..2bee24684 --- /dev/null +++ b/tensorlayer/dataflow/dataflow_examples.py @@ -0,0 +1,56 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer.dataflow import Dataset +import numpy as np + +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield (_input, np.array(_target)) + + +batch_size = 128 +shuffle_buffer_size = 128 +n_epoch = 10 + +import tensorflow as tf + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +import multiprocessing +train_ds = Dataset.from_generator( + generator=generator_train, output_types=(tl.float32, tl.int32) +) # , output_shapes=((24, 24, 3), (1))) + +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) + +train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) + +for X_batch, y_batch in train_ds: + print(X_batch.shape, y_batch.shape) diff --git a/tensorlayer/dataflow/image/__init__.py b/tensorlayer/dataflow/image/__init__.py new file mode 100644 index 000000000..df05229a7 --- /dev/null +++ b/tensorlayer/dataflow/image/__init__.py @@ -0,0 +1,2 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- diff --git a/tensorlayer/dataflow/load_data_backend.py b/tensorlayer/dataflow/load_data_backend.py new file mode 100644 index 000000000..7fe8d12e0 --- /dev/null +++ b/tensorlayer/dataflow/load_data_backend.py @@ -0,0 +1,9 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + pass +if BACKEND == 'mindspore': + pass diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py new file mode 100644 index 000000000..27e647a8a --- /dev/null +++ b/tensorlayer/dataflow/mindspore_data.py @@ -0,0 +1,44 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import mindspore.dataset as dataset + +__all__ = ['FromGenerator', 'Map', 'Shuffle', 'Prefetch', 'Batch', 'Repeat'] + + +def FromGenerator(generator, output_types, output_shapes=None, args=None): + pass + + +def Map(ds, map_func, num_parallel_calls=None): + """ Maps map_func across the elements of this dataset. + + Parameters + ---------- + ds : DataFlow + input DataFlow + map_func : function + A function mapping a dataset element to another dataset element. + num_parallel_calls + + Returns + ------- + + """ + pass + + +def Shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): + pass + + +def Prefetch(ds, buffer_size): + pass + + +def Batch(ds, batch_size, drop_remainder=False): + pass + + +def Repeat(ds, count=None): + pass diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py new file mode 100644 index 000000000..39d887fd1 --- /dev/null +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -0,0 +1,44 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf + +__all__ = ['FromGenerator', 'Map', 'Shuffle', 'Prefetch', 'Batch', 'Repeat'] + + +def FromGenerator(generator, output_types, output_shapes=None, args=None): + return tf.data.Dataset.from_generator(generator, output_types, output_shapes=output_shapes, args=args) + + +def Map(ds, map_func, num_parallel_calls=None): + """ Maps map_func across the elements of this dataset. + + Parameters + ---------- + ds : DataFlow + input DataFlow + map_func : function + A function mapping a dataset element to another dataset element. + num_parallel_calls + + Returns + ------- + + """ + return ds.map(map_func, num_parallel_calls=num_parallel_calls) + + +def Shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): + return ds.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) + + +def Prefetch(ds, buffer_size): + return ds.prefetch(buffer_size=buffer_size) + + +def Batch(ds, batch_size, drop_remainder=False): + return ds.batch(batch_size=batch_size, drop_remainder=drop_remainder) + + +def Repeat(ds, count=None): + return ds.repeat(count=count) diff --git a/tensorlayer/decorators/__init__.py b/tensorlayer/decorators/__init__.py index 2a289862a..ba8d5eb9f 100644 --- a/tensorlayer/decorators/__init__.py +++ b/tensorlayer/decorators/__init__.py @@ -5,7 +5,7 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ diff --git a/tensorlayer/files/__init__.py b/tensorlayer/files/__init__.py index 0de8a9737..8d985afff 100644 --- a/tensorlayer/files/__init__.py +++ b/tensorlayer/files/__init__.py @@ -5,7 +5,7 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ @@ -68,10 +68,10 @@ 'save_ckpt', 'save_npz', 'save_npz_dict', + 'load_and_assign_ckpt', + 'ckpt_to_npz_dict' #'save_graph', #'load_graph', #'save_graph_and_params', #'load_graph_and_params', - 'load_and_assign_ckpt', - 'ckpt_to_npz_dict' ] diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index ff3c84cb9..bc6d44d97 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -19,6 +19,7 @@ import cloudpickle import h5py import numpy as np +import progressbar import scipy.io as sio import tensorflow as tf from six.moves import cPickle @@ -28,11 +29,14 @@ from tensorflow.python.util.tf_export import keras_export from tensorflow.python import pywrap_tensorflow -import progressbar import tensorlayer as tl from tensorlayer import logging, nlp, utils, visualize -# from six.moves import zip +if tl.BACKEND == 'mindspore': + from mindspore.ops.operations import Assign + from mindspore.nn import Cell + from mindspore import Tensor + import mindspore as ms if sys.version_info[0] == 2: from urllib import urlretrieve @@ -67,7 +71,9 @@ 'save_npz', 'save_npz_dict', 'tf_variables_to_numpy', + 'ms_variables_to_numpy', 'assign_tf_variable', + 'assign_ms_variable', 'save_weights_to_hdf5', 'load_hdf5_to_weights_in_order', 'load_hdf5_to_weights', @@ -78,7 +84,7 @@ # 'save_pkl_graph', # 'load_pkl_graph', 'load_and_assign_ckpt', - 'ckpt_to_npz_dict', + 'ckpt_to_npz_dict' ] @@ -1950,7 +1956,13 @@ def save_npz(save_list=None, name='model.npz'): if save_list is None: save_list = [] - save_list_var = tf_variables_to_numpy(save_list) + if tl.BACKEND == 'tensorflow': + save_list_var = tf_variables_to_numpy(save_list) + elif tl.BACKEND == 'mindspore': + save_list_var = ms_variables_to_numpy(save_list) + else: + raise NotImplementedError("This backend is not supported") + # print(name, save_list_var) np.savez(name, params=save_list_var) save_list_var = None del save_list_var @@ -2015,8 +2027,25 @@ def assign_weights(weights, network): """ ops = [] - for idx, param in enumerate(weights): - ops.append(network.all_weights[idx].assign(param)) + if tl.BACKEND == 'tensorflow': + for idx, param in enumerate(weights): + ops.append(network.all_weights[idx].assign(param)) + elif tl.BACKEND == 'mindspore': + + class Assign_net(Cell): + + def __init__(self, y): + super(Assign_net, self).__init__() + self.y = y + + def construct(self, x): + Assign()(self.y, x) + + for idx, param in enumerate(weights): + assign_param = Tensor(param, dtype=ms.float32) + # net = Assign_net(network.all_weights[idx]) + # net(assign_param) + Assign()(network.all_weights[idx], assign_param) return ops @@ -2064,7 +2093,12 @@ def save_npz_dict(save_list=None, name='model.npz'): save_list = [] save_list_names = [tensor.name for tensor in save_list] - save_list_var = tf_variables_to_numpy(save_list) + if tl.BACKEND == 'tensorflow': + save_list_var = tf_variables_to_numpy(save_list) + elif tl.BACKEND == 'mindspore': + save_list_var = ms_variables_to_numpy(save_list) + else: + raise NotImplementedError('Not implemented') save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)} np.savez(name, **save_var_dict) save_list_var = None @@ -2108,7 +2142,11 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False): "if you want to skip redundant or mismatch weights." % key ) else: - assign_tf_variable(network.all_weights[net_weights_name.index(key)], weights[key]) + if tl.BACKEND == 'tensorflow': + assign_tf_variable(network.all_weights[net_weights_name.index(key)], weights[key]) + elif tl.BACKEND == 'mindspore': + assign_param = Tensor(weights[key], dtype=ms.float32) + assign_ms_variable(network.all_weights[net_weights_name.index(key)], assign_param) logging.info("[*] Model restored from npz_dict %s" % name) @@ -2544,11 +2582,38 @@ def tf_variables_to_numpy(variables): return results +def ms_variables_to_numpy(variables): + """Convert MS tensor or list of tensors into a list of numpy array""" + if not isinstance(variables, list): + var_list = [variables] + else: + var_list = variables + + results = [v.data.asnumpy() for v in var_list] + return results + + def assign_tf_variable(variable, value): """Assign value to a TF variable""" variable.assign(value) +def assign_ms_variable(variable, value): + + class Assign_net(Cell): + + def __init__(self, y): + super(Assign_net, self).__init__() + self.y = y + + def construct(self, x): + Assign()(self.y, x) + + # net = Assign_net(variable) + # net(value) + Assign()(variable, value) + + def _save_weights_to_hdf5_group(f, layers): """ Save layer/model weights into hdf5 group recursively. @@ -2780,46 +2845,6 @@ def load_hdf5_to_weights(filepath, network, skip=False): logging.info("[*] Load %s SUCCESS!" % filepath) -def check_ckpt_file(model_dir): - model_dir = model_dir - model_path = None - count_extension = 0 - for root, dirs, files in os.walk(model_dir): - for file in files: - filename, extension = os.path.splitext(file) - if extension in ['.data-00000-of-00001', '.index', '.meta']: - count_extension += 1 - if count_extension == 3: - model_path = model_dir + '/' + filename - else: - raise Exception("Check the file extension for missing .data-00000-of-00001, .index, .meta") - if model_path is None: - raise Exception('The ckpt file is not found') - return model_path, filename - - -def rename_weight_or_biases(variable_name): - if variable_name is None: - return variable_name - split_var = variable_name.split('/') - - str_temp = '' - for i in range(len(split_var)): - if 'w' in split_var[i]: - split_var[i] = 'filters:0' - elif 'b' in split_var[i]: - split_var[i] = 'biases:0' - else: - pass - - if i < len(split_var) - 1: - str_temp = str_temp + split_var[i] + '/' - else: - str_temp = str_temp + split_var[i] - - return str_temp - - def load_and_assign_ckpt(model_dir, network=None, skip=True): """Load weights by name from a given file of ckpt format @@ -2838,7 +2863,16 @@ def load_and_assign_ckpt(model_dir, network=None, skip=True): ------- """ - model_path, filename = check_ckpt_file(model_dir) + model_dir = model_dir + model_path = None + for root, dirs, files in os.walk(model_dir): + for file in files: + filename, extension = os.path.splitext(file) + if extension in ['.data-00000-of-00001', '.index', '.meta']: + model_path = model_dir + '/' + filename + break + if model_path == None: + raise Exception('The ckpt file is not found') reader = pywrap_tensorflow.NewCheckpointReader(model_path) var_to_shape_map = reader.get_variable_to_shape_map() @@ -2859,7 +2893,7 @@ def load_and_assign_ckpt(model_dir, network=None, skip=True): logging.info("[*] Model restored from ckpt %s" % filename) -def ckpt_to_npz_dict(model_dir, save_name='model.npz', rename_key=False): +def ckpt_to_npz_dict(model_dir, save_name='model.npz'): """ Save ckpt weights to npz file Parameters @@ -2869,27 +2903,28 @@ def ckpt_to_npz_dict(model_dir, save_name='model.npz', rename_key=False): Examples: model_dir = /root/cnn_model/ save_name : str The save_name of the `.npz` file. - rename_key : bool - Modify parameter naming, used to match TL naming rule. - Examples: conv1_1/b_b --> conv1_1/biases:0 ; conv1_1/w_w --> conv1_1/filters:0 Returns ------- """ - model_path, _ = check_ckpt_file(model_dir) + model_dir = model_dir + model_path = None + for root, dirs, files in os.walk(model_dir): + for file in files: + filename, extension = os.path.splitext(file) + if extension in ['.data-00000-of-00001', '.index', '.meta']: + model_path = model_dir + '/' + filename + break + if model_path == None: + raise Exception('The ckpt file is not found') reader = pywrap_tensorflow.NewCheckpointReader(model_path) var_to_shape_map = reader.get_variable_to_shape_map() parameters_dict = {} - if rename_key is False: - for key in sorted(var_to_shape_map): - parameters_dict[key] = reader.get_tensor(key) - elif rename_key is True: - for key in sorted(var_to_shape_map): - parameters_dict[rename_weight_or_biases(key)] = reader.get_tensor(key) - + for key in sorted(var_to_shape_map): + parameters_dict[key] = reader.get_tensor(key) np.savez(save_name, **parameters_dict) parameters_dict = None del parameters_dict diff --git a/tensorlayer/initializers.py b/tensorlayer/initializers.py index aaf4f37ac..b7b972115 100644 --- a/tensorlayer/initializers.py +++ b/tensorlayer/initializers.py @@ -2,11 +2,11 @@ # -*- coding: utf-8 -*- import numpy as np -import tensorflow as tf +import tensorlayer as tl __all__ = [ 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', - 'deconv2d_bilinear_upsampling_initializer' + 'deconv2d_bilinear_upsampling_initializer', 'He_Normal' ] @@ -22,7 +22,7 @@ def __call__(self, shape, dtype=None): shape : tuple of int. The shape of the tensor. dtype : Optional dtype of the tensor. - If not provided will return tensor of `tf.float32`. + If not provided will return tensor of `tl.float32`. Returns ------- @@ -61,16 +61,16 @@ class Zeros(Initializer): """Initializer that generates tensors initialized to 0. """ - def __call__(self, shape, dtype=tf.float32): - return tf.zeros(shape, dtype=dtype) + def __call__(self, shape, dtype=tl.float32): + return tl.zeros(shape, dtype=dtype) class Ones(Initializer): """Initializer that generates tensors initialized to 1. """ - def __call__(self, shape, dtype=tf.float32): - return tf.ones(shape, dtype=dtype) + def __call__(self, shape, dtype=tl.float32): + return tl.ones(shape, dtype=dtype) class Constant(Initializer): @@ -86,8 +86,8 @@ class Constant(Initializer): def __init__(self, value=0): self.value = value - def __call__(self, shape, dtype=None): - return tf.constant(self.value, shape=shape, dtype=dtype) + def __call__(self, shape, dtype=tl.float32): + return tl.constant(self.value, shape=shape, dtype=dtype) def get_config(self): return {"value": self.value} @@ -112,8 +112,8 @@ def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.maxval = maxval self.seed = seed - def __call__(self, shape, dtype=tf.float32): - return tf.random.uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed) + def __call__(self, shape, dtype=tl.float32): + return tl.random_uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed) def get_config(self): return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed} @@ -137,8 +137,8 @@ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.stddev = stddev self.seed = seed - def __call__(self, shape, dtype=tf.float32): - return tf.random.normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + def __call__(self, shape, dtype=tl.float32): + return tl.random_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) def get_config(self): return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} @@ -168,13 +168,33 @@ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.stddev = stddev self.seed = seed - def __call__(self, shape, dtype=tf.float32): - return tf.random.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + def __call__(self, shape, dtype=tl.float32): + return tl.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) def get_config(self): return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} +class He_Normal(Initializer): + """He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, seed=None): + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.he_normal(seed=self.seed, shape=shape, dtype=dtype) + + def get_config(self): + return {"seed", self.seed} + + def deconv2d_bilinear_upsampling_initializer(shape): """Returns the initializer that can be passed to DeConv2dLayer for initializing the weights in correspondence to channel-wise bilinear up-sampling. @@ -220,7 +240,7 @@ def deconv2d_bilinear_upsampling_initializer(shape): weights[:, :, i, i] = bilinear_kernel # assign numpy array to constant_initalizer and pass to get_variable - return tf.constant_initializer(value=weights) + return Constant(value=weights) # Alias @@ -230,3 +250,4 @@ def deconv2d_bilinear_upsampling_initializer(shape): random_uniform = RandomUniform random_normal = RandomNormal truncated_normal = TruncatedNormal +he_normal = He_Normal diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py index d67024381..1a130b5c3 100644 --- a/tensorlayer/layers/__init__.py +++ b/tensorlayer/layers/__init__.py @@ -11,14 +11,14 @@ from .extend import * from .image_resampling import * from .inputs import * -from .lambda_layers import * +# from .lambda_layers import * from .merge import * from .noise import * from .normalization import * from .padding import * from .pooling import * from .quantize import * -from .recurrent import * +# from .recurrent import * from .scale import * from .shape import * from .spatial_transformer import * diff --git a/tensorlayer/layers/activation.py b/tensorlayer/layers/activation.py index 31abaeaba..1a64b07a4 100644 --- a/tensorlayer/layers/activation.py +++ b/tensorlayer/layers/activation.py @@ -1,15 +1,10 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - from tensorlayer import logging -from tensorlayer.activation import leaky_relu6, leaky_twice_relu6 -from tensorlayer.decorators import deprecated_alias +import tensorlayer as tl from tensorlayer.initializers import truncated_normal -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig +from tensorlayer.layers.core import Module __all__ = [ 'PRelu', @@ -18,7 +13,7 @@ ] -class PRelu(Layer): +class PRelu(Module): """ The :class:`PRelu` class is Parametric Rectified Linear layer. It follows f(x) = alpha * x for x < 0, f(x) = x for x >= 0, @@ -54,17 +49,16 @@ class PRelu(Layer): """ def __init__( - self, - channel_shared=False, - in_channels=None, - a_init=truncated_normal(mean=0.0, stddev=0.05), - name=None # "prelu" + self, channel_shared=False, in_channels=None, a_init=truncated_normal(mean=0.0, stddev=0.05), name=None, + data_format='channels_last', dim=2 ): super(PRelu, self).__init__(name) self.channel_shared = channel_shared self.in_channels = in_channels self.a_init = a_init + self.data_format = data_format + self.dim = dim if self.channel_shared: self.build((None, )) @@ -86,22 +80,29 @@ def __repr__(self): def build(self, inputs_shape): if self.channel_shared: w_shape = (1, ) - else: - w_shape = (inputs_shape[-1], ) + elif self.data_format == 'channels_last': + w_shape = (self.in_channels, ) + elif self.data_format == 'channels_first': + if self.dim == 2: + w_shape = (1, self.in_channels, 1, 1) + elif self.dim == 1: + w_shape = (1, self.in_channels, 1) + elif self.dim == 3: + w_shape = (1, self.in_channels, 1, 1, 1) + else: + raise Exception("Dim should be equal to 1, 2 or 3") self.alpha_var = self._get_weights("alpha", shape=w_shape, init=self.a_init) - self.alpha_var_constrained = tf.nn.sigmoid(self.alpha_var, name="constraining_alpha_var_in_0_1") + self.relu = tl.ops.ReLU() + self.sigmoid = tl.ops.Sigmoid() - # @tf.function def forward(self, inputs): - - pos = tf.nn.relu(inputs) - self.alpha_var_constrained = tf.nn.sigmoid(self.alpha_var, name="constraining_alpha_var_in_0_1") - neg = -self.alpha_var_constrained * tf.nn.relu(-inputs) - + pos = self.relu(inputs) + alpha_var_constrained = self.sigmoid(self.alpha_var) + neg = -alpha_var_constrained * self.relu(-inputs) return pos + neg -class PRelu6(Layer): +class PRelu6(Module): """ The :class:`PRelu6` class is Parametric Rectified Linear layer integrating ReLU6 behaviour. @@ -145,13 +146,17 @@ def __init__( channel_shared=False, in_channels=None, a_init=truncated_normal(mean=0.0, stddev=0.05), - name=None # "prelu6" + name=None, # "prelu6" + data_format='channels_last', + dim=2 ): super(PRelu6, self).__init__(name) self.channel_shared = channel_shared self.in_channels = in_channels self.a_init = a_init + self.data_format = data_format + self.dim = dim if self.channel_shared: self.build((None, )) @@ -173,21 +178,31 @@ def __repr__(self): def build(self, inputs_shape): if self.channel_shared: w_shape = (1, ) - else: - w_shape = (inputs_shape[-1], ) + elif self.data_format == 'channels_last': + w_shape = (self.in_channels, ) + elif self.data_format == 'channels_first': + if self.dim == 2: + w_shape = (1, self.in_channels, 1, 1) + elif self.dim == 1: + w_shape = (1, self.in_channels, 1) + elif self.dim == 3: + w_shape = (1, self.in_channels, 1, 1, 1) + else: + raise Exception("Dim should be equal to 1, 2 or 3") self.alpha_var = self._get_weights("alpha", shape=w_shape, init=self.a_init) - self.alpha_var_constrained = tf.nn.sigmoid(self.alpha_var, name="constraining_alpha_var_in_0_1") + self.sigmoid = tl.ops.Sigmoid() + self.relu = tl.ops.ReLU() # @tf.function def forward(self, inputs): - pos = tf.nn.relu(inputs) - pos_6 = -tf.nn.relu(inputs - 6) - neg = -self.alpha_var_constrained * tf.nn.relu(-inputs) - + alpha_var_constrained = self.sigmoid(self.alpha_var) + pos = self.relu(inputs) + pos_6 = -self.relu(inputs - 6) + neg = -alpha_var_constrained * self.relu(-inputs) return pos + pos_6 + neg -class PTRelu6(Layer): +class PTRelu6(Module): """ The :class:`PTRelu6` class is Parametric Rectified Linear layer integrating ReLU6 behaviour. @@ -261,21 +276,30 @@ def __repr__(self): def build(self, inputs_shape): if self.channel_shared: w_shape = (1, ) - else: - w_shape = (inputs_shape[-1], ) + elif self.data_format == 'channels_last': + w_shape = (self.in_channels, ) + elif self.data_format == 'channels_first': + if self.dim == 2: + w_shape = (1, self.in_channels, 1, 1) + elif self.dim == 1: + w_shape = (1, self.in_channels, 1) + elif self.dim == 3: + w_shape = (1, self.in_channels, 1, 1, 1) + else: + raise Exception("Dim should be equal to 1, 2 or 3") # Alpha for outputs lower than zeros self.alpha_low = self._get_weights("alpha_low", shape=w_shape, init=self.a_init) - self.alpha_low_constrained = tf.nn.sigmoid(self.alpha_low, name="constraining_alpha_low_in_0_1") - + self.sigmoid = tl.ops.Sigmoid() + self.relu = tl.ops.ReLU() # Alpha for outputs higher than 6 self.alpha_high = self._get_weights("alpha_high", shape=w_shape, init=self.a_init) - self.alpha_high_constrained = tf.nn.sigmoid(self.alpha_high, name="constraining_alpha_high_in_0_1") # @tf.function def forward(self, inputs): - pos = tf.nn.relu(inputs) - pos_6 = -tf.nn.relu(inputs - 6) + self.alpha_high_constrained * tf.nn.relu(inputs - 6) - neg = -self.alpha_low_constrained * tf.nn.relu(-inputs) - + alpha_low_constrained = self.sigmoid(self.alpha_low) + alpha_high_constrained = self.sigmoid(self.alpha_high) + pos = self.relu(inputs) + pos_6 = -self.relu(inputs - 6) + alpha_high_constrained * self.relu(inputs - 6) + neg = -alpha_low_constrained * self.relu(-inputs) return pos + pos_6 + neg diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index 8cf4bd74c..d4763a0ea 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -5,24 +5,24 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ -from .binary_conv import * -from .deformable_conv import * +# from .binary_conv import * +# from .deformable_conv import * from .depthwise_conv import * -from .dorefa_conv import * -from .expert_conv import * -from .expert_deconv import * -from .group_conv import * -from .quan_conv import * -from .quan_conv_bn import * -from .separable_conv import * +# from .dorefa_conv import * +# from .expert_conv import * +# from .expert_deconv import * +# from .group_conv import * +# from .quan_conv import * +# from .quan_conv_bn import * +# from .separable_conv import * from .simplified_conv import * -from .simplified_deconv import * -from .super_resolution import * -from .ternary_conv import * +# from .simplified_deconv import * +# from .super_resolution import * +# from .ternary_conv import * __all__ = [ @@ -32,18 +32,18 @@ 'Conv3d', # simplified deconv - 'DeConv2d', - 'DeConv3d', + # 'DeConv2d', + # 'DeConv3d', # expert conv - 'Conv1dLayer', - 'Conv2dLayer', - 'Conv3dLayer', + # 'Conv1dLayer', + # 'Conv2dLayer', + # 'Conv3dLayer', # expert conv - 'DeConv1dLayer', - 'DeConv2dLayer', - 'DeConv3dLayer', + # 'DeConv1dLayer', + # 'DeConv2dLayer', + # 'DeConv3dLayer', # atrous # 'AtrousConv1dLayer', @@ -51,32 +51,32 @@ # 'AtrousDeConv2d', # binary - 'BinaryConv2d', + # 'BinaryConv2d', # deformable - 'DeformableConv2d', + # 'DeformableConv2d', # depthwise 'DepthwiseConv2d', # dorefa - 'DorefaConv2d', + # 'DorefaConv2d', # group - 'GroupConv2d', + # 'GroupConv2d', # separable - 'SeparableConv1d', - 'SeparableConv2d', + # 'SeparableConv1d', + # 'SeparableConv2d', # subpixel - 'SubpixelConv1d', - 'SubpixelConv2d', + # 'SubpixelConv1d', + # 'SubpixelConv2d', # ternary - 'TernaryConv2d', + # 'TernaryConv2d', #quan_conv - 'QuanConv2d', - 'QuanConv2dWithBN', + # 'QuanConv2d', + # 'QuanConv2dWithBN', ] diff --git a/tensorlayer/layers/convolution/binary_conv.py b/tensorlayer/layers/convolution/binary_conv.py deleted file mode 100644 index 92929ae92..000000000 --- a/tensorlayer/layers/convolution/binary_conv.py +++ /dev/null @@ -1,159 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import quantize - -__all__ = ['BinaryConv2d'] - - -class BinaryConv2d(Layer): - """ - The :class:`BinaryConv2d` class is a 2D binary CNN layer, which weights are either -1 or 1 while inference. - - Note that, the bias vector would not be binarized. - - Parameters - ---------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inference. - TODO: support gemm - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - W_init : initializer - The initializer for the the weight matrix. - b_init : initializer or None - The initializer for the the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([8, 100, 100, 32], name='input') - >>> binaryconv2d = tl.layers.QuanConv2d( - ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=32, name='binaryconv2d' - ... )(net) - >>> print(binaryconv2d) - >>> output shape : (8, 50, 50, 64) - - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3), - strides=(1, 1), - act=None, - padding='SAME', - use_gemm=False, - data_format="channels_last", - dilation_rate=(1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'binary_cnn2d', - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = self._strides = strides - self.padding = padding - self.use_gemm = use_gemm - self.data_format = data_format - self._dilation_rate = self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels: - self.build(None) - self._built = True - - logging.info( - "BinaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - if len(self.strides) != 2: - raise ValueError("len(strides) should be 2.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - - def forward(self, inputs): - - _W = quantize(self.W) - - outputs = tf.nn.conv2d( - input=inputs, filters=_W, strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate, name=self.name - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - - return outputs diff --git a/tensorlayer/layers/convolution/deformable_conv.py b/tensorlayer/layers/convolution/deformable_conv.py deleted file mode 100644 index 3a8038c39..000000000 --- a/tensorlayer/layers/convolution/deformable_conv.py +++ /dev/null @@ -1,371 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias, private_method -from tensorlayer.layers.core import Layer - -__all__ = [ - 'DeformableConv2d', -] - - -class DeformableConv2d(Layer): - """The :class:`DeformableConv2d` class is a 2D - `Deformable Convolutional Networks `__. - - Parameters - ---------- - offset_layer : tf.Tensor - To predict the offset of convolution operations. - The shape is (batchsize, input height, input width, 2*(number of element in the convolution kernel)) - e.g. if apply a 3*3 kernel, the number of the last dimension should be 18 (2*3*3) - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : str - A unique layer name. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.InputLayer([5, 10, 10, 16], name='input') - >>> offset1 = tl.layers.Conv2d( - ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1' - ... )(net) - >>> deformconv1 = tl.layers.DeformableConv2d( - ... offset_layer=offset1, n_filter=32, filter_size=(3, 3), name='deformable1' - ... )(net) - >>> offset2 = tl.layers.Conv2d( - ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset2' - ... )(deformconv1) - >>> deformconv2 = tl.layers.DeformableConv2d( - ... offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2' - ... )(deformconv1) - - References - ---------- - - The deformation operation was adapted from the implementation in `here `__ - - Notes - ----- - - The padding is fixed to 'SAME'. - - The current implementation is not optimized for memory usgae. Please use it carefully. - - """ - - # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - offset_layer=None, - # shape=(3, 3, 1, 100), - n_filter=32, - filter_size=(3, 3), - act=None, - padding='SAME', - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'deformable_conv_2d', - ): - super().__init__(name, act=act) - - self.offset_layer = offset_layer - self.n_filter = n_filter - self.filter_size = filter_size - self.padding = padding - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - self.kernel_n = filter_size[0] * filter_size[1] - if self.offset_layer.get_shape()[-1] != 2 * self.kernel_n: - raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * self.kernel_n) - - logging.info( - "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" % ( - self.name, self.n_filter, str(self.filter_size - ), self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - # try: - # pre_channel = int(prev_layer.outputs.get_shape()[-1]) - # except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net - # pre_channel = 1 - # logging.info("[warnings] unknow input channels, set to 1") - # shape = (filter_size[0], filter_size[1], pre_channel, n_filter) - - # with tf.compat.v1.variable_scope(name): - # offset = self.offset_layer # .outputs - # - # # if offset.get_shape()[-1] != 2 * shape[0] * shape[1]: - # # raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * shape[0] * shape[1]) - # - # # Grid initialisation - # input_h = int(self.inputs.get_shape()[1]) - # input_w = int(self.inputs.get_shape()[2]) - # # kernel_n = shape[0] * shape[1] - # initial_offsets = tf.stack( - # tf.meshgrid(tf.range(shape[0]), tf.range(shape[1]), indexing='ij') - # ) # initial_offsets --> (kh, kw, 2) - # initial_offsets = tf.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) - # initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) - # initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) - # initial_offsets = tf.tile(initial_offsets, [input_h, input_w, 1, 1]) # initial_offsets --> (h, w, n, 2) - # initial_offsets = tf.cast(initial_offsets, 'float32') - # grid = tf.meshgrid( - # tf.range(-int((shape[0] - 1) / 2.0), int(input_h - int((shape[0] - 1) / 2.0)), 1), - # tf.range(-int((shape[1] - 1) / 2.0), int(input_w - int((shape[1] - 1) / 2.0)), 1), indexing='ij' - # ) - # - # grid = tf.stack(grid, axis=-1) - # grid = tf.cast(grid, 'float32') # grid --> (h, w, 2) - # grid = tf.expand_dims(grid, 2) # grid --> (h, w, 1, 2) - # grid = tf.tile(grid, [1, 1, self.kernel_n, 1]) # grid --> (h, w, n, 2) - # grid_offset = grid + initial_offsets # grid_offset --> (h, w, n, 2) - # - # input_deform = self._tf_batch_map_offsets(self.inputs, offset, grid_offset) - # - # # W = tf.compat.v1.get_variable( - # # name='W_deformableconv2d', shape=[1, 1, shape[0] * shape[1], shape[-2], shape[-1]], initializer=W_init, - # # dtype=LayersConfig.tf_dtype, - # # ) - # - # # _tensor = tf.nn.conv3d(input_deform, W, strides=[1, 1, 1, 1, 1], padding='VALID', name=None) - # # _tensor = tf.nn.conv3d( - # # input=input_deform, - # # filters=W, - # # strides=[1, 1, 1, 1, 1], - # # padding='VALID', - # # name=None - # # ) - # - # # if b_init: - # # b = tf.compat.v1.get_variable( - # # name='b_deformableconv2d', shape=(shape[-1]), initializer=b_init, # dtype=LayersConfig.tf_dtype, - # # ) - # # - # # _tensor = tf.nn.bias_add(_tensor, b, name='bias_add') - # - # # self.outputs = tf.reshape( - # # tensor=self._apply_activation(_tensor), - # # shape=[tf.shape(input=self.inputs)[0], input_h, input_w, shape[-1]] - # # ) - # - # # self._add_layers(self.outputs) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', padding={padding}' - ) - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - - self.in_channels = inputs_shape[-1] - - self.input_h = int(inputs_shape[1]) - self.input_w = int(inputs_shape[2]) - initial_offsets = tf.stack( - tf.meshgrid(tf.range(self.filter_size[0]), tf.range(self.filter_size[1]), indexing='ij') - ) # initial_offsets --> (kh, kw, 2) - initial_offsets = tf.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) - initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) - initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) - initial_offsets = tf.tile( - initial_offsets, [self.input_h, self.input_w, 1, 1] - ) # initial_offsets --> (h, w, n, 2) - initial_offsets = tf.cast(initial_offsets, 'float32') - grid = tf.meshgrid( - tf.range( - -int((self.filter_size[0] - 1) / 2.0), int(self.input_h - int((self.filter_size[0] - 1) / 2.0)), 1 - ), - tf.range( - -int((self.filter_size[1] - 1) / 2.0), int(self.input_w - int((self.filter_size[1] - 1) / 2.0)), 1 - ), indexing='ij' - ) - - grid = tf.stack(grid, axis=-1) - grid = tf.cast(grid, 'float32') # grid --> (h, w, 2) - grid = tf.expand_dims(grid, 2) # grid --> (h, w, 1, 2) - grid = tf.tile(grid, [1, 1, self.kernel_n, 1]) # grid --> (h, w, n, 2) - self.grid_offset = grid + initial_offsets # grid_offset --> (h, w, n, 2) - - self.filter_shape = (1, 1, self.kernel_n, self.in_channels, self.n_filter) - - self.W = self._get_weights("W_deformableconv2d", shape=self.filter_shape, init=self.W_init) - - if self.b_init: - self.b = self._get_weights("b_deformableconv2d", shape=(self.n_filter, ), init=self.b_init) - - def forward(self, inputs): - # shape = (filter_size[0], filter_size[1], pre_channel, n_filter) - offset = self.offset_layer - grid_offset = self.grid_offset - - input_deform = self._tf_batch_map_offsets(inputs, offset, grid_offset) - outputs = tf.nn.conv3d(input=input_deform, filters=self.W, strides=[1, 1, 1, 1, 1], padding='VALID', name=None) - outputs = tf.reshape(tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter]) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs - - def _to_bc_h_w(self, x, x_shape): - """(b, h, w, c) -> (b*c, h, w)""" - x = tf.transpose(a=x, perm=[0, 3, 1, 2]) - x = tf.reshape(x, (-1, x_shape[1], x_shape[2])) - return x - - def _to_b_h_w_n_c(self, x, x_shape): - """(b*c, h, w, n) -> (b, h, w, n, c)""" - x = tf.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3])) - x = tf.transpose(a=x, perm=[0, 2, 3, 4, 1]) - return x - - def tf_flatten(self, a): - """Flatten tensor""" - return tf.reshape(a, [-1]) - - def _get_vals_by_coords(self, inputs, coords, idx, out_shape): - indices = tf.stack( - [idx, self.tf_flatten(coords[:, :, :, :, 0]), - self.tf_flatten(coords[:, :, :, :, 1])], axis=-1 - ) - vals = tf.gather_nd(inputs, indices) - vals = tf.reshape(vals, out_shape) - return vals - - def _tf_repeat(self, a, repeats): - """Tensorflow version of np.repeat for 1D""" - # https://github.com/tensorflow/tensorflow/issues/8521 - - if len(a.get_shape()) != 1: - raise AssertionError("This is not a 1D Tensor") - - a = tf.expand_dims(a, -1) - a = tf.tile(a, [1, repeats]) - a = self.tf_flatten(a) - return a - - def _tf_batch_map_coordinates(self, inputs, coords): - """Batch version of tf_map_coordinates - - Only supports 2D feature maps - - Parameters - ---------- - inputs : ``tf.Tensor`` - shape = (b*c, h, w) - coords : ``tf.Tensor`` - shape = (b*c, h, w, n, 2) - - Returns - ------- - ``tf.Tensor`` - A Tensor with the shape as (b*c, h, w, n) - - """ - inputs_shape = inputs.get_shape() - coords_shape = coords.get_shape() - batch_channel = tf.shape(input=inputs)[0] - input_h = int(inputs_shape[1]) - input_w = int(inputs_shape[2]) - kernel_n = int(coords_shape[3]) - n_coords = input_h * input_w * kernel_n - - coords_lt = tf.cast(tf.floor(coords), 'int32') - coords_rb = tf.cast(tf.math.ceil(coords), 'int32') - coords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) - coords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) - - idx = self._tf_repeat(tf.range(batch_channel), n_coords) - - vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n)) - vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n)) - vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n)) - vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n)) - - coords_offset_lt = coords - tf.cast(coords_lt, 'float32') - - vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0] - vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0] - mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1] - - return mapped_vals - - def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): - """Batch map offsets into input - - Parameters - ------------ - inputs : ``tf.Tensor`` - shape = (b, h, w, c) - offsets: ``tf.Tensor`` - shape = (b, h, w, 2*n) - grid_offset: `tf.Tensor`` - Offset grids shape = (h, w, n, 2) - - Returns - ------- - ``tf.Tensor`` - A Tensor with the shape as (b, h, w, c) - - """ - inputs_shape = inputs.get_shape() - batch_size = tf.shape(input=inputs)[0] - kernel_n = int(int(offsets.get_shape()[3]) / 2) - input_h = inputs_shape[1] - input_w = inputs_shape[2] - channel = inputs_shape[3] - - # inputs (b, h, w, c) --> (b*c, h, w) - inputs = self._to_bc_h_w(inputs, inputs_shape) - - # offsets (b, h, w, 2*n) --> (b, h, w, n, 2) - offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2)) - # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2) - # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1]) - - coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2) - coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2) - - # clip out of bound - coords = tf.stack( - [ - tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')), - tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32')) - ], axis=-1 - ) - coords = tf.tile(coords, [channel, 1, 1, 1, 1]) - - mapped_vals = self._tf_batch_map_coordinates(inputs, coords) - # (b*c, h, w, n) --> (b, h, w, n, c) - mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel]) - - return mapped_vals diff --git a/tensorlayer/layers/convolution/depthwise_conv.py b/tensorlayer/layers/convolution/depthwise_conv.py index 4f963d317..1cc90e1bb 100644 --- a/tensorlayer/layers/convolution/depthwise_conv.py +++ b/tensorlayer/layers/convolution/depthwise_conv.py @@ -1,21 +1,17 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig +from tensorlayer.layers.core import Module +from tensorlayer.backend import BACKEND __all__ = [ 'DepthwiseConv2d', ] -class DepthwiseConv2d(Layer): +class DepthwiseConv2d(Module): """Separable/Depthwise Convolutional 2D layer, see `tf.nn.depthwise_conv2d `__. Input: @@ -54,7 +50,7 @@ class DepthwiseConv2d(Layer): >>> net = tl.layers.Input([8, 200, 200, 32], name='input') >>> depthwiseconv2d = tl.layers.DepthwiseConv2d( - ... filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise' + ... filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=2, name='depthwise' ... )(net) >>> print(depthwiseconv2d) >>> output shape : (8, 200, 200, 64) @@ -100,12 +96,12 @@ def __init__( logging.info( "DepthwiseConv2d %s: filter_size: %s strides: %s pad: %s act: %s" % ( self.name, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' + self.act.__class__.__name__ if self.act is not None else 'No Activation' ) ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' ', strides={strides}, padding={padding}' @@ -128,35 +124,41 @@ def build(self, inputs_shape): if self.in_channels is None: self.in_channels = inputs_shape[-1] self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] elif self.data_format == 'channels_first': self.data_format = 'NCHW' if self.in_channels is None: self.in_channels = inputs_shape[1] self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] else: raise Exception("data_format should be either channels_last or channels_first") self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.depth_multiplier) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + # Set the size of kernel as (K1,K2), then the shape is (K,Cin,K1,K2), K must be 1. + if BACKEND == 'mindspore': + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, 1) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.in_channels * self.depth_multiplier), init=self.b_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - def forward(self, inputs): - outputs = tf.nn.depthwise_conv2d( - input=inputs, - filter=self.W, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - dilations=self.dilation_rate, - name=self.name, + self.depthwise_conv2d = tl.ops.DepthwiseConv2d( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + ksize=self.filter_size, channel_multiplier=self.depth_multiplier ) + + self.b_init_flag = False if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') + self.b = self._get_weights("biases", shape=(self.in_channels * self.depth_multiplier, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.act_init_flag = False if self.act: + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.depthwise_conv2d(input=inputs, filter=self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: outputs = self.act(outputs) return outputs diff --git a/tensorlayer/layers/convolution/dorefa_conv.py b/tensorlayer/layers/convolution/dorefa_conv.py deleted file mode 100644 index bc80f5e3a..000000000 --- a/tensorlayer/layers/convolution/dorefa_conv.py +++ /dev/null @@ -1,169 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import cabs, quantize_active, quantize_weight - -__all__ = ['DorefaConv2d'] - - -class DorefaConv2d(Layer): - """The :class:`DorefaConv2d` class is a 2D quantized convolutional layer, which weights are 'bitW' bits and the output of the previous layer - are 'bitA' bits while inferencing. - - Note that, the bias vector would not be binarized. - - Parameters - ---------- - bitW : int - The bits of this layer's parameter - bitA : int - The bits of the output of previous layer - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inferencing. - TODO: support gemm - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - W_init : initializer - The initializer for the the weight matrix. - b_init : initializer or None - The initializer for the the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([8, 12, 12, 32], name='input') - >>> dorefaconv2d = tl.layers.QuanConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='dorefaconv2d' - ... )(net) - >>> print(dorefaconv2d) - >>> output shape : (8, 12, 12, 32) - - """ - - def __init__( - self, - bitW=1, - bitA=3, - n_filter=32, - filter_size=(3, 3), - strides=(1, 1), - act=None, - padding='SAME', - use_gemm=False, - data_format="channels_last", - dilation_rate=(1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'dorefa_cnn2d', - ): - super().__init__(name, act=act) - self.bitW = bitW - self.bitA = bitA - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = self._strides = strides - self.padding = padding - self.use_gemm = use_gemm - self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels: - self.build(None) - self._built = True - - logging.info( - "DorefaConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - if len(self.strides) != 2: - raise ValueError("len(strides) should be 2.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - - def forward(self, inputs): - - inputs = quantize_active(cabs(inputs), self.bitA) # Do not remove - - W_ = quantize_weight(self.W, self.bitW) - - outputs = tf.nn.conv2d( - input=inputs, filters=W_, strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate, name=self.name - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - - return outputs diff --git a/tensorlayer/layers/convolution/expert_conv.py b/tensorlayer/layers/convolution/expert_conv.py deleted file mode 100644 index 062a2738c..000000000 --- a/tensorlayer/layers/convolution/expert_conv.py +++ /dev/null @@ -1,372 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig - -__all__ = [ - 'Conv1dLayer', - 'Conv2dLayer', - 'Conv3dLayer', -] - - -class Conv1dLayer(Layer): - """ - The :class:`Conv1dLayer` class is a 1D CNN layer, see `tf.nn.conv1d `__. - - Parameters - ---------- - act : activation function - The activation function of this layer. - shape : tuple of int - The shape of the filters: (filter_length, in_channels, out_channels). - stride : int - The number of entries by which the filter is moved right at a step. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - 'NWC' or 'NCW', Default is 'NWC' as it is a 1D CNN. - dilation_rate : int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name - - Notes - ----- - - shape = [w, the number of output channel of previous layer, the number of output channels] - - the number of output channel of a layer is its last dimension. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([8, 100, 1], name='input') - >>> conv1d = tl.layers.Conv1dLayer(shape=(5, 1, 32), stride=2, b_init=None, name='conv1d_1') - >>> print(conv1d) - >>> tensor = tl.layers.Conv1dLayer(shape=(5, 1, 32), stride=2, act=tf.nn.relu, name='conv1d_2')(net) - >>> print(tensor) - - """ - - def __init__( - self, - act=None, - shape=(5, 1, 5), - stride=1, - padding='SAME', - data_format='NWC', - dilation_rate=1, - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'cnn1d_layer', - ): - super().__init__(name, act=act) - self.n_filter = shape[-1] - self.filter_size = shape[0] - self.shape = shape - self.stride = stride - self.dilation_rate = dilation_rate - self.padding = padding - self.data_format = data_format - self.W_init = W_init - self.b_init = b_init - self.in_channels = shape[-2] - - self.build(None) - self._built = True - - logging.info( - "Conv1dLayer %s: shape: %s stride: %s pad: %s act: %s" % ( - self.name, str(shape), str(stride), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', stride={stride}, padding={padding}' - ) - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init) - - def forward(self, inputs): - - outputs = tf.nn.conv1d( - input=inputs, - filters=self.W, - stride=self.stride, - padding=self.padding, - dilations=[ - self.dilation_rate, - ], - data_format=self.data_format, - name=self.name, - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs - - -class Conv2dLayer(Layer): - """ - The :class:`Conv2dLayer` class is a 2D CNN layer, see `tf.nn.conv2d `__. - - Parameters - ---------- - act : activation function - The activation function of this layer. - shape : tuple of int - The shape of the filters: (filter_height, filter_width, in_channels, out_channels). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "NHWC" or "NCHW", default is "NHWC". - dilation_rate : tuple of int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name. - - Notes - ----- - - shape = [h, w, the number of output channel of previous layer, the number of output channels] - - the number of output channel of a layer is its last dimension. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([8, 28, 28, 1], name='input') - >>> conv2d = tl.layers.Conv2dLayer(shape=(5, 5, 1, 32), strides=(1, 1, 1, 1), b_init=None, name='conv2d_1') - >>> print(conv2d) - >>> tensor = tl.layers.Conv2dLayer(shape=(5, 5, 1, 32), strides=(1, 1, 1, 1), act=tf.nn.relu, name='conv2d_2')(net) - >>> print(tensor) - - """ - - def __init__( - self, - act=None, - shape=(5, 5, 1, 100), - strides=(1, 1, 1, 1), - padding='SAME', - data_format='NHWC', - dilation_rate=(1, 1, 1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'cnn2d_layer', - ): - super().__init__(name, act=act) - self.n_filter = shape[-1] - self.filter_size = (shape[0], shape[1]) - self.shape = shape - self.strides = strides - self.dilation_rate = dilation_rate - self.padding = padding - self.data_format = data_format - self.W_init = W_init - self.b_init = b_init - self.in_channels = shape[-2] - - self.build(None) - self._built = True - - logging.info( - "Conv2dLayer %s: shape: %s strides: %s pad: %s act: %s" % ( - self.name, str(shape), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != [ - 1, - ] * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs): - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init) - - def forward(self, inputs): - outputs = tf.nn.conv2d( - input=inputs, - filters=self.W, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilations=list(self.dilation_rate), - name=self.name, - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs - - -class Conv3dLayer(Layer): - """ - The :class:`Conv3dLayer` class is a 3D CNN layer, see `tf.nn.conv3d `__. - - Parameters - ---------- - act : activation function - The activation function of this layer. - shape : tuple of int - Shape of the filters: (filter_depth, filter_height, filter_width, in_channels, out_channels). - strides : tuple of int - The sliding window strides for corresponding input dimensions. - Must be in the same order as the shape dimension. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "NDHWC" or "NCDHW", default is "NDHWC". - dilation_rate : tuple of int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name. - - Notes - ----- - - shape = [d, h, w, the number of output channel of previous layer, the number of output channels] - - the number of output channel of a layer is its last dimension. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([8, 100, 100, 100, 3], name='input') - >>> conv3d = tl.layers.Conv3dLayer(shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1), b_init=None, name='conv3d_1') - >>> print(conv3d) - >>> tensor = tl.layers.Conv3dLayer(shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1), act=tf.nn.relu, name='conv3d_2')(net) - >>> print(tensor) - - """ - - def __init__( - self, - act=None, - shape=(2, 2, 2, 3, 32), - strides=(1, 2, 2, 2, 1), - padding='SAME', - data_format='NDHWC', - dilation_rate=(1, 1, 1, 1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'cnn3d_layer' - ): - super().__init__(name, act=act) - self.n_filter = shape[-1] - self.filter_size = (shape[0], shape[1], shape[2]) - self.shape = shape - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = shape[-2] - - self.build(None) - self._built = True - - logging.info( - "Conv3dLayer %s: shape: %s strides: %s pad: %s act: %s" % ( - self.name, str(shape), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != [ - 1, - ] * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs): - - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init) - - def forward(self, inputs): - outputs = tf.nn.conv3d( - input=inputs, - filters=self.W, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, #'NDHWC', - dilations=list(self.dilation_rate), #[1, 1, 1, 1, 1], - name=self.name, - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/convolution/expert_deconv.py b/tensorlayer/layers/convolution/expert_deconv.py deleted file mode 100644 index ace1f221b..000000000 --- a/tensorlayer/layers/convolution/expert_deconv.py +++ /dev/null @@ -1,397 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig - -__all__ = [ - 'DeConv1dLayer', - 'DeConv2dLayer', - 'DeConv3dLayer', -] - - -class DeConv1dLayer(Layer): - """A de-convolution 1D layer. - - See `tf.nn.conv1d_transpose `__. - - Parameters - ---------- - act : activation function or None - The activation function of this layer. - shape : tuple of int - Shape of the filters: (height, width, output_channels, in_channels). - The filter's ``in_channels`` dimension must match that of value. - outputs_shape : tuple of int - Output shape of the deconvolution, - strides : tuple of int - The sliding window strides for corresponding input dimensions. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "NWC" or "NCW", default is "NWC". - dilation_rate : int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name. - - Notes - ----- - - shape = [w, the number of output channels of this layer, the number of output channel of the previous layer]. - - outputs_shape = [batch_size, any, the number of output channels of this layer]. - - the number of output channel of a layer is its last dimension. - - Examples - -------- - >>> input_layer = Input([8, 25, 32], name='input_layer') - >>> deconv1d = tl.layers.DeConv1dLayer( - ... shape=(5, 64, 32), outputs_shape=(8, 50, 64), strides=(1, 2, 1), name='deconv1dlayer' - ... ) - >>> print(deconv1d) - >>> tensor = tl.layers.DeConv1dLayer( - ... shape=(5, 64, 32), outputs_shape=(8, 50, 64), strides=(1, 2, 1), name='deconv1dlayer' - ... )(input_layer) - >>> print(tensor) - >>> output shape : (8, 50, 64) - - """ - - def __init__( - self, - act=None, - shape=(3, 128, 256), - outputs_shape=(1, 256, 128), - strides=(1, 2, 1), - padding='SAME', - data_format='NWC', - dilation_rate=(1, 1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'decnn1d_layer', - ): - super().__init__(name, act=act) - self.shape = shape - self.outputs_shape = outputs_shape - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = self.shape[-1] - - self.build(None) - self._built = True - - logging.info( - "DeConv1dLayer %s: shape: %s out_shape: %s strides: %s pad: %s act: %s" % ( - self.name, str(shape), str(outputs_shape), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, n_filter=self.shape[-2], filter_size=self.shape[0], **self.__dict__ - ) - - def build(self, inputs): - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.shape[-2]), init=self.b_init) - - def forward(self, inputs): - outputs = tf.nn.conv1d_transpose( - input=inputs, - filters=self.W, - output_shape=self.outputs_shape, - strides=list(self.strides), - padding=self.padding, - data_format=self.data_format, - dilations=list(self.dilation_rate), - name=self.name, - ) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs - - -class DeConv2dLayer(Layer): - """A de-convolution 2D layer. - - See `tf.nn.conv2d_transpose `__. - - Parameters - ---------- - act : activation function or None - The activation function of this layer. - shape : tuple of int - Shape of the filters: (height, width, output_channels, in_channels). - The filter's ``in_channels`` dimension must match that of value. - outputs_shape : tuple of int - Output shape of the deconvolution, - strides : tuple of int - The sliding window strides for corresponding input dimensions. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "NHWC" or "NCHW", default is "NHWC". - dilation_rate : tuple of int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name. - - Notes - ----- - - shape = [h, w, the number of output channels of this layer, the number of output channel of the previous layer]. - - outputs_shape = [batch_size, any, any, the number of output channels of this layer]. - - the number of output channel of a layer is its last dimension. - - Examples - -------- - With TensorLayer - - TODO: Add the example code of a part of the generator in DCGAN example - - U-Net - - >>> .... - >>> conv10 = tl.layers.Conv2dLayer( - ... act=tf.nn.relu, - ... shape=(3, 3, 1024, 1024), strides=(1, 1, 1, 1), padding='SAME', - ... W_init=w_init, b_init=b_init, name='conv10' - ... )(conv9) - >>> print(conv10) - (batch_size, 32, 32, 1024) - >>> deconv1 = tl.layers.DeConv2dLayer( - ... act=tf.nn.relu, - ... shape=(3, 3, 512, 1024), strides=(1, 2, 2, 1), outputs_shape=(batch_size, 64, 64, 512), - ... padding='SAME', W_init=w_init, b_init=b_init, name='devcon1_1' - ... )(conv10) - - """ - - def __init__( - self, - act=None, - shape=(3, 3, 128, 256), - outputs_shape=(1, 256, 256, 128), - strides=(1, 2, 2, 1), - padding='SAME', - data_format='NHWC', - dilation_rate=(1, 1, 1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'decnn2d_layer', - ): - super().__init__(name, act=act) - self.shape = shape - self.outputs_shape = outputs_shape - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = self.shape[-1] - - self.build(None) - self._built = True - - logging.info( - "DeConv2dLayer %s: shape: %s out_shape: %s strides: %s pad: %s act: %s" % ( - self.name, str(shape), str(outputs_shape), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, n_filter=self.shape[-2], filter_size=(self.shape[0], self.shape[1]), - **self.__dict__ - ) - - def build(self, inputs): - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.shape[-2]), init=self.b_init) - - def forward(self, inputs): - outputs = tf.nn.conv2d_transpose( - input=inputs, - filters=self.W, - output_shape=self.outputs_shape, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilations=list(self.dilation_rate), - name=self.name, - ) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs - - -class DeConv3dLayer(Layer): - """A de-convolution 3D layer. - - See `tf.nn.conv3d_transpose `__. - - Parameters - ---------- - act : activation function or None - The activation function of this layer. - shape : tuple of int - The shape of the filters: (depth, height, width, output_channels, in_channels). - The filter's in_channels dimension must match that of value. - outputs_shape : tuple of int - The output shape of the deconvolution. - strides : tuple of int - The sliding window strides for corresponding input dimensions. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "NDHWC" or "NCDHW", default is "NDHWC". - dilation_rate : tuple of int - Filter up-sampling/input down-sampling rate. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - name : None or str - A unique layer name. - - Notes - ----- - - shape = [d, h, w, the number of output channels of this layer, the number of output channel of the previous layer]. - - outputs_shape = [batch_size, any, any, any, the number of output channels of this layer]. - - the number of output channel of a layer is its last dimension. - - Examples - -------- - >>> input_layer = Input([8, 10, 10, 10 32], name='input_layer') - >>> deconv3d = tl.layers.DeConv3dLayer( - ... shape=(2, 2, 2, 128, 32), outputs_shape=(8, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1), name='deconv3dlayer' - ... ) - >>> print(deconv3d) - >>> tensor = tl.layers.DeConv1dLayer( - ... shape=(2, 2, 2, 128, 32), outputs_shape=(8, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1), name='deconv3dlayer' - ... )(input_layer) - >>> print(tensor) - >>> output shape : (8, 20, 20, 20, 128) - - """ - - def __init__( - self, - act=None, - shape=(2, 2, 2, 128, 256), - outputs_shape=(1, 12, 32, 32, 128), - strides=(1, 2, 2, 2, 1), - padding='SAME', - data_format='NDHWC', - dilation_rate=(1, 1, 1, 1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - name=None # 'decnn3d_layer', - ): - super().__init__(name, act=act) - self.shape = shape - self.outputs_shape = outputs_shape - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = self.shape[-1] - - self.build(None) - self._built = True - - logging.info( - "DeConv3dLayer %s: shape: %s out_shape: %s strides: %s pad: %s act: %s" % ( - self.name, str(shape), str(outputs_shape), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, n_filter=self.shape[-2], - filter_size=(self.shape[0], self.shape[1], self.shape[2]), **self.__dict__ - ) - - def build(self, inputs): - self.W = self._get_weights("filters", shape=self.shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.shape[-2]), init=self.b_init) - - def forward(self, inputs): - outputs = tf.nn.conv3d_transpose( - input=inputs, filters=self.W, output_shape=self.outputs_shape, strides=self.strides, padding=self.padding, - data_format=self.data_format, dilations=list(self.dilation_rate), name=self.name - ) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/convolution/group_conv.py b/tensorlayer/layers/convolution/group_conv.py deleted file mode 100644 index 78b7b17fa..000000000 --- a/tensorlayer/layers/convolution/group_conv.py +++ /dev/null @@ -1,157 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig - -__all__ = [ - 'GroupConv2d', -] - - -class GroupConv2d(Layer): - """The :class:`GroupConv2d` class is 2D grouped convolution, see `here `__. - - Parameters - -------------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size. - strides : tuple of int - The stride step. - n_group : int - The number of groups. - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([8, 24, 24, 32], name='input') - >>> groupconv2d = tl.layers.QuanConv2d( - ... n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, name='group' - ... )(net) - >>> print(groupconv2d) - >>> output shape : (8, 12, 12, 64) - - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3), - strides=(2, 2), - n_group=2, - act=None, - padding='SAME', - data_format='channels_last', - dilation_rate=(1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'groupconv', - ): # Windaway - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = self._strides = strides - self.n_group = n_group - self.padding = padding - self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels: - self.build(None) - self._built = True - - logging.info( - "GroupConv2d %s: n_filter: %d size: %s strides: %s n_group: %d pad: %s act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), n_group, padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.groupConv = lambda i, k: tf.nn.conv2d( - i, k, strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self. - _dilation_rate, name=self.name - ) - - self.filter_shape = ( - self.filter_size[0], self.filter_size[1], int(self.in_channels / self.n_group), self.n_filter - ) - - self.We = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=self.n_filter, init=self.b_init) - - def forward(self, inputs): - if self.n_group == 1: - outputs = self.groupConv(inputs, self.We) - else: - inputGroups = tf.split(axis=3, num_or_size_splits=self.n_group, value=inputs) - weightsGroups = tf.split(axis=3, num_or_size_splits=self.n_group, value=self.We) - convGroups = [self.groupConv(i, k) for i, k in zip(inputGroups, weightsGroups)] - outputs = tf.concat(axis=3, values=convGroups) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/convolution/quan_conv.py b/tensorlayer/layers/convolution/quan_conv.py deleted file mode 100644 index 6d17376c8..000000000 --- a/tensorlayer/layers/convolution/quan_conv.py +++ /dev/null @@ -1,170 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) - -__all__ = ['QuanConv2d'] - - -class QuanConv2d(Layer): - """The :class:`QuanConv2d` class is a quantized convolutional layer without BN, which weights are 'bitW' bits and the output of the previous layer - are 'bitA' bits while inferencing. - Note that, the bias vector would not be binarized. - - Parameters - ---------- - With TensorLayer - - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - bitW : int - The bits of this layer's parameter - bitA : int - The bits of the output of previous layer - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inference. - TODO: support gemm - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - W_init : initializer - The initializer for the the weight matrix. - b_init : initializer or None - The initializer for the the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([8, 12, 12, 64], name='input') - >>> quanconv2d = tl.layers.QuanConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d' - ... )(net) - >>> print(quanconv2d) - >>> output shape : (8, 12, 12, 32) - - """ - - def __init__( - self, - bitW=8, - bitA=8, - n_filter=32, - filter_size=(3, 3), - strides=(1, 1), - act=None, - padding='SAME', - use_gemm=False, - data_format="channels_last", - dilation_rate=(1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'quan_cnn2d', - ): - super().__init__(name, act=act) - self.bitW = bitW - self.bitA = bitA - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = self._strides = strides - self.padding = padding - self.use_gemm = use_gemm - self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels: - self.build(None) - self._built = True - - logging.info( - "QuanConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - if len(self.strides) != 2: - raise ValueError("len(strides) should be 2.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - - def forward(self, inputs): - - inputs = quantize_active_overflow(inputs, self.bitA) # Do not remove - - W_ = quantize_weight_overflow(self.W, self.bitW) - - outputs = tf.nn.conv2d( - input=inputs, filters=W_, strides=self.strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate, name=self.name - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - - return outputs diff --git a/tensorlayer/layers/convolution/quan_conv_bn.py b/tensorlayer/layers/convolution/quan_conv_bn.py deleted file mode 100644 index df20a6835..000000000 --- a/tensorlayer/layers/convolution/quan_conv_bn.py +++ /dev/null @@ -1,234 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf -from tensorflow.python.training import moving_averages - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) - -# from tensorlayer.layers.core import LayersConfig - -__all__ = ['QuanConv2dWithBN'] - - -class QuanConv2dWithBN(Layer): - """The :class:`QuanConv2dWithBN` class is a quantized convolutional layer with BN, which weights are 'bitW' bits and the output of the previous layer - are 'bitA' bits while inferencing. - - Note that, the bias vector would keep the same. - - Parameters - ---------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - padding : str - The padding algorithm type: "SAME" or "VALID". - act : activation function - The activation function of this layer. - decay : float - A decay factor for `ExponentialMovingAverage`. - Suggest to use a large value for large dataset. - epsilon : float - Eplison. - is_train : boolean - Is being used for training or inference. - beta_init : initializer or None - The initializer for initializing beta, if None, skip beta. - Usually you should not skip beta unless you know what happened. - gamma_init : initializer or None - The initializer for initializing gamma, if None, skip gamma. - bitW : int - The bits of this layer's parameter - bitA : int - The bits of the output of previous layer - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). - W_init : initializer - The initializer for the the weight matrix. - W_init_args : dictionary - The arguments for the weight matrix initializer. - data_format : str - "NHWC" or "NCHW", default is "NHWC". - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - in_channels : int - The number of in channels. - name : str - A unique layer name. - - Examples - --------- - >>> import tensorlayer as tl - >>> net = tl.layers.Input([50, 256, 256, 3]) - >>> layer = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1') - >>> print(layer) - >>> net = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1')(net) - >>> print(net) - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3), - strides=(1, 1), - padding='SAME', - act=None, - decay=0.9, - epsilon=1e-5, - is_train=False, - gamma_init=tl.initializers.truncated_normal(stddev=0.02), - beta_init=tl.initializers.truncated_normal(stddev=0.02), - bitW=8, - bitA=8, - use_gemm=False, - W_init=tl.initializers.truncated_normal(stddev=0.02), - W_init_args=None, - data_format="channels_last", - dilation_rate=(1, 1), - in_channels=None, - name='quan_cnn2d_bn', - ): - super(QuanConv2dWithBN, self).__init__(act=act, name=name) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = strides - self.padding = padding - self.decay = decay - self.epsilon = epsilon - self.is_train = is_train - self.gamma_init = gamma_init - self.beta_init = beta_init - self.bitW = bitW - self.bitA = bitA - self.use_gemm = use_gemm - self.W_init = W_init - self.W_init_args = W_init_args - self.data_format = data_format - self.dilation_rate = dilation_rate - self.in_channels = in_channels - logging.info( - "QuanConv2dWithBN %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s " % ( - self.name, n_filter, filter_size, str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if self.in_channels: - self.build(None) - self._built = True - - if use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - if len(strides) != 2: - raise ValueError("len(strides) should be 2.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' + actstr - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self.strides[0], self.strides[1], 1] - self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self.strides[0], self.strides[1]] - self._dilation_rate = [1, 1, self.dilation_rate[0], self.dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - - para_bn_shape = (self.n_filter, ) - if self.gamma_init: - self.scale_para = self._get_weights( - "scale_para", shape=para_bn_shape, init=self.gamma_init, trainable=self.is_train - ) - else: - self.scale_para = None - - if self.beta_init: - self.offset_para = self._get_weights( - "offset_para", shape=para_bn_shape, init=self.beta_init, trainable=self.is_train - ) - else: - self.offset_para = None - - self.moving_mean = self._get_weights( - "moving_mean", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False - ) - self.moving_variance = self._get_weights( - "moving_variance", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False - ) - - def forward(self, inputs): - x = inputs - inputs = quantize_active_overflow(inputs, self.bitA) # Do not remove - outputs = tf.nn.conv2d( - input=x, filters=self.W, strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate, name=self.name - ) - - mean, variance = tf.nn.moments(outputs, axes=list(range(len(outputs.get_shape()) - 1))) - - update_moving_mean = moving_averages.assign_moving_average( - self.moving_mean, mean, self.decay, zero_debias=False - ) # if zero_debias=True, has bias - update_moving_variance = moving_averages.assign_moving_average( - self.moving_variance, mean, self.decay, zero_debias=False - ) # if zero_debias=True, has bias - - if self.is_train: - mean, var = self.mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance) - else: - mean, var = self.moving_mean, self.moving_variance - - w_fold = self._w_fold(self.W, self.scale_para, var, self.epsilon) - - W_ = quantize_weight_overflow(w_fold, self.bitW) - - conv_fold = tf.nn.conv2d(inputs, W_, strides=self.strides, padding=self.padding, data_format=self.data_format) - - if self.beta_init: - bias_fold = self._bias_fold(self.offset_para, self.scale_para, mean, var, self.epsilon) - conv_fold = tf.nn.bias_add(conv_fold, bias_fold, name='bn_bias_add') - - if self.act: - conv_fold = self.act(conv_fold) - - return conv_fold - - def mean_var_with_update(self, update_moving_mean, update_moving_variance, mean, variance): - with tf.control_dependencies([update_moving_mean, update_moving_variance]): - return tf.identity(mean), tf.identity(variance) - - def _w_fold(self, w, gama, var, epsilon): - return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) - - def _bias_fold(self, beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/layers/convolution/separable_conv.py b/tensorlayer/layers/convolution/separable_conv.py deleted file mode 100644 index 156a5f80d..000000000 --- a/tensorlayer/layers/convolution/separable_conv.py +++ /dev/null @@ -1,307 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import get_collection_trainable - -__all__ = [ - 'SeparableConv1d', - 'SeparableConv2d', -] - - -class SeparableConv1d(Layer): - """The :class:`SeparableConv1d` class is a 1D depthwise separable convolutional layer. - - This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. - - Parameters - ------------ - n_filter : int - The dimensionality of the output space (i.e. the number of filters in the convolution). - filter_size : int - Specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. - strides : int - Specifying the stride of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. - padding : str - One of "valid" or "same" (case-insensitive). - data_format : str - One of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). - dilation_rate : int - Specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. - depth_multiplier : int - The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. - depthwise_init : initializer - for the depthwise convolution kernel. - pointwise_init : initializer - For the pointwise convolution kernel. - b_init : initializer - For the bias vector. If None, ignore bias in the pointwise part only. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([8, 50, 64], name='input') - >>> separableconv1d = tl.layers.Conv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d')(net) - >>> print(separableconv1d) - >>> output shape : (8, 25, 32) - - """ - - # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - n_filter=100, - filter_size=3, - strides=1, - act=None, - padding='valid', - data_format='channels_last', - dilation_rate=1, - depth_multiplier=1, - depthwise_init=None, - pointwise_init=None, - b_init=tl.initializers.constant(value=0.0), - # depthwise_regularizer=None, - # pointwise_regularizer=None, - # bias_regularizer=None, - # activity_regularizer=None, - # depthwise_constraint=None, - # pointwise_constraint=None, - # W_init=tf.truncated_normal_initializer(stddev=0.1), - # b_init=tf.constant_initializer(value=0.0), - in_channels=None, - name=None # 'seperable1d', - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.depth_multiplier = depth_multiplier - self.depthwise_init = depthwise_init - self.pointwise_init = pointwise_init - self.b_init = b_init - self.in_channels = in_channels - - logging.info( - "SeparableConv1d %s: n_filter: %d filter_size: %s strides: %s depth_multiplier: %d act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), depth_multiplier, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', stride={strides}, padding={padding}' - ) - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - self.layer = tf.keras.layers.SeparableConv1D( - filters=self.n_filter, - kernel_size=self.filter_size, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilation_rate=self.dilation_rate, - depth_multiplier=self.depth_multiplier, - activation=self.act, - use_bias=(True if self.b_init is not None else False), - depthwise_initializer=self.depthwise_init, - pointwise_initializer=self.pointwise_init, - bias_initializer=self.b_init, - # depthwise_regularizer=None, - # pointwise_regularizer=None, - # bias_regularizer=None, - # activity_regularizer=None, - # depthwise_constraint=None, - # pointwise_constraint=None, - # bias_constraint=None, - trainable=True, - name=self.name - ) - if self.data_format == "channels_first": - self.in_channels = inputs_shape[1] - else: - self.in_channels = inputs_shape[-1] - - # _out = self.layer(np.random.uniform([1] + list(inputs_shape))) # initialize weights - _out = self.layer( - tf.convert_to_tensor(np.random.uniform(size=list(inputs_shape)), dtype=np.float) - ) # initialize weights - outputs_shape = _out.shape - # self._add_weights(self.layer.weights) - self._trainable_weights = self.layer.weights - - def forward(self, inputs): - outputs = self.layer(inputs) - return outputs - - -class SeparableConv2d(Layer): - """The :class:`SeparableConv2d` class is a 2D depthwise separable convolutional layer. - - This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. - While :class:`DepthwiseConv2d` performs depthwise convolution only, which allow us to add batch normalization between depthwise and pointwise convolution. - - Parameters - ------------ - n_filter : int - The dimensionality of the output space (i.e. the number of filters in the convolution). - filter_size : tuple/list of 2 int - Specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. - strides : tuple/list of 2 int - Specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. - padding : str - One of "valid" or "same" (case-insensitive). - data_format : str - One of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). - dilation_rate : integer or tuple/list of 2 int - Specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. - depth_multiplier : int - The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. - depthwise_init : initializer - for the depthwise convolution kernel. - pointwise_init : initializer - For the pointwise convolution kernel. - b_init : initializer - For the bias vector. If None, ignore bias in the pointwise part only. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([8, 50, 50, 64], name='input') - >>> separableconv2d = tl.layers.Conv1d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='VALID', name='separableconv2d')(net) - >>> print(separableconv2d) - >>> output shape : (8, 24, 24, 32) - - """ - - # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - n_filter=100, - filter_size=(3, 3), - strides=(1, 1), - act=None, - padding='valid', - data_format='channels_last', - dilation_rate=(1, 1), - depth_multiplier=1, - depthwise_init=None, - pointwise_init=None, - b_init=tl.initializers.constant(value=0.0), - # depthwise_regularizer=None, - # pointwise_regularizer=None, - # bias_regularizer=None, - # activity_regularizer=None, - # depthwise_constraint=None, - # pointwise_constraint=None, - # W_init=tf.truncated_normal_initializer(stddev=0.1), - # b_init=tf.constant_initializer(value=0.0), - in_channels=None, - name=None # 'seperable2d', - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.depth_multiplier = depth_multiplier - self.depthwise_init = depthwise_init - self.pointwise_init = pointwise_init - self.b_init = b_init - self.in_channels = in_channels - - logging.info( - "SeparableConv2d %s: n_filter: %d filter_size: %s filter_size: %s depth_multiplier: %d act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), depth_multiplier, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', stride={strides}, padding={padding}' - ) - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - self.layer = tf.keras.layers.SeparableConv2D( - filters=self.n_filter, - kernel_size=self.filter_size, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilation_rate=self.dilation_rate, - depth_multiplier=self.depth_multiplier, - activation=self.act, - use_bias=(True if self.b_init is not None else False), - depthwise_initializer=self.depthwise_init, - pointwise_initializer=self.pointwise_init, - bias_initializer=self.b_init, - # depthwise_regularizer=None, - # pointwise_regularizer=None, - # bias_regularizer=None, - # activity_regularizer=None, - # depthwise_constraint=None, - # pointwise_constraint=None, - # bias_constraint=None, - trainable=True, - name=self.name - ) - if self.data_format == "channels_first": - self.in_channels = inputs_shape[1] - else: - self.in_channels = inputs_shape[-1] - # _out = self.layer(np.random.uniform([1] + list(inputs_shape))) # initialize weights - _out = self.layer( - tf.convert_to_tensor(np.random.uniform(size=list(inputs_shape)), dtype=np.float) - ) # initialize weights - outputs_shape = _out.shape - self._trainable_weights = self.layer.weights - - def forward(self, inputs): - outputs = self.layer(inputs) - return outputs diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index fab3d5817..677f00f71 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -1,22 +1,14 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - +from tensorlayer.layers.core import Module import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import get_collection_trainable -__all__ = [ - 'Conv1d', - 'Conv2d', - 'Conv3d', -] +__all__ = ['Conv1d', 'Conv2d', 'Conv3d'] -class Conv1d(Layer): +class Conv1d(Module): """Simplified version of :class:`Conv1dLayer`. Parameters @@ -51,7 +43,7 @@ class Conv1d(Layer): >>> net = tl.layers.Input([8, 100, 1], name='input') >>> conv1d = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='conv1d_1') >>> print(conv1d) - >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tf.nn.relu, name='conv1d_2')(net) + >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='conv1d_2')(net) >>> print(tensor) """ @@ -88,12 +80,12 @@ def __init__( logging.info( "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s" % ( self.name, n_filter, filter_size, stride, padding, - self.act.__name__ if self.act is not None else 'No Activation' + self.act.__class__.__name__ if self.act is not None else 'No Activation' ) ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' ', stride={stride}, padding={padding}' @@ -125,27 +117,32 @@ def build(self, inputs_shape): # TODO : check self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.b_init_flag = False if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True - def forward(self, inputs): - outputs = tf.nn.conv1d( - input=inputs, - filters=self.W, - stride=self.stride, - padding=self.padding, - data_format=self.data_format, - dilations=self.dilation_rate, - name=self.name, + self.conv1d = tl.ops.Conv1D( + stride=self.stride, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, + out_channel=self.n_filter, k_size=self.filter_size ) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') + + self.act_init_flag = False if self.act: - outputs = self.act(outputs) + self.activate = self.act + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv1d(inputs, self.W) + if self.b_init_flag: + outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) + if self.act_init_flag: + outputs = self.activate(outputs) return outputs -class Conv2d(Layer): +class Conv2d(Module): """Simplified version of :class:`Conv2dLayer`. Parameters @@ -178,10 +175,10 @@ class Conv2d(Layer): -------- With TensorLayer - >>> net = tl.layers.Input([8, 400, 400, 3], name='input') + >>> net = tl.layers.Input([8, 3, 400, 400], name='input') >>> conv2d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_1') >>> print(conv2d) - >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='conv2d_2')(net) + >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_2')(net) >>> print(tensor) """ @@ -198,9 +195,9 @@ def __init__( W_init=tl.initializers.truncated_normal(stddev=0.02), b_init=tl.initializers.constant(value=0.0), in_channels=None, - name=None # 'conv2d', + name=None, # 'conv2d', ): - super().__init__(name, act=act) + super(Conv2d, self).__init__(name, act=act) self.n_filter = n_filter self.filter_size = filter_size self._strides = self.strides = strides @@ -218,12 +215,12 @@ def __init__( logging.info( "Conv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' + self.act.__class__.__name__ if self.act is not None else 'No Activation' ) ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' ', strides={strides}, padding={padding}' @@ -254,35 +251,41 @@ def build(self, inputs_shape): else: raise Exception("data_format should be either channels_last or channels_first") + #TODO channels first filter shape [out_channel, in_channel, filter_h, filter_w] self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.b_init_flag = False if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True - def forward(self, inputs): - outputs = tf.nn.conv2d( - input=inputs, - filters=self.W, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, #'NHWC', - dilations=self._dilation_rate, #[1, 1, 1, 1], - name=self.name, + self.conv2d = tl.ops.Conv2D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]) ) - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') + + self.act_init_flag = False if self.act: + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv2d(inputs, self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: outputs = self.act(outputs) return outputs -class Conv3d(Layer): +class Conv3d(Module): """Simplified version of :class:`Conv3dLayer`. Parameters - ---------- + ---AppData\Local\Continuum\anaconda3\envs\ms_tf\lib\site-packages\mindspore\common\api.py", line 412, in compile + result = self._executor.compile(obj, args_list, phase, use_vm) +RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of type 'std:------- n_filter : int The number of filters. filter_size : tuple of int @@ -312,9 +315,9 @@ class Conv3d(Layer): With TensorLayer >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') - >>> conv3d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='conv3d_1') + >>> conv3d = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='conv3d_1') >>> print(conv3d) - >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tf.nn.relu, name='conv3d_2')(net) + >>> tensor = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='conv3d_2')(net) >>> print(tensor) """ @@ -351,12 +354,12 @@ def __init__( logging.info( "Conv3d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' + self.act.__class__.__name__ if self.act is not None else 'No Activation' ) ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' ', strides={strides}, padding={padding}' @@ -396,18 +399,26 @@ def build(self, inputs_shape): if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - def forward(self, inputs): - outputs = tf.nn.conv3d( - input=inputs, - filters=self.W, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, #'NDHWC', - dilations=self._dilation_rate, #[1, 1, 1, 1, 1], - name=self.name, - ) + self.b_init_flag = False if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.conv3d = tl.ops.Conv3D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]) + ) + + self.act_init_flag = False if self.act: - outputs = self.act(outputs) + self.activate = self.act() + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv3d(inputs, self.W) + if self.b_init_flag: + outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) + if self.act_init_flag: + outputs = self.activate(outputs) return outputs diff --git a/tensorlayer/layers/convolution/simplified_deconv.py b/tensorlayer/layers/convolution/simplified_deconv.py deleted file mode 100644 index 8e967c114..000000000 --- a/tensorlayer/layers/convolution/simplified_deconv.py +++ /dev/null @@ -1,273 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import get_collection_trainable - -__all__ = [ - # 'DeConv1d' # TODO: Shall be implemented - 'DeConv2d', - 'DeConv3d', -] - - -class DeConv2d(Layer): - """Simplified version of :class:`DeConv2dLayer`, see `tf.nn.conv3d_transpose `__. - - Parameters - ---------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The stride step (height, width). - padding : str - The padding algorithm type: "SAME" or "VALID". - act : activation function - The activation function of this layer. - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : int of tuple of int - The dilation rate to use for dilated convolution - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([5, 100, 100, 32], name='input') - >>> deconv2d = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), in_channels=32, name='DeConv2d_1') - >>> print(deconv2d) - >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d_2')(net) - >>> print(tensor) - - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3), - strides=(2, 2), - act=None, - padding='SAME', - dilation_rate=(1, 1), - data_format='channels_last', - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'decnn2d' - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = strides - self.padding = padding - self.data_format = data_format - self.dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - # Attention: To build, we need not only the in_channels! Solved. - if self.in_channels is not None: - self.build(None) - self._built = True - - logging.info( - "DeConv2d {}: n_filters: {} strides: {} padding: {} act: {} dilation: {}".format( - self.name, - str(n_filter), - str(strides), - padding, - self.act.__name__ if self.act is not None else 'No Activation', - dilation_rate, - ) - ) - - if len(strides) != 2: - raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - self.layer = tf.keras.layers.Conv2DTranspose( - filters=self.n_filter, - kernel_size=self.filter_size, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - dilation_rate=self.dilation_rate, - activation=self.act, - use_bias=(True if self.b_init is not None else False), - kernel_initializer=self.W_init, - bias_initializer=self.b_init, - # dtype=tf.float32, - name=self.name, - ) - if inputs_shape is not None: - self.in_channels = inputs_shape[1 if self.data_format == "channels_first" else -1] - elif self.in_channels is not None: - inputs_shape = [1, self.in_channels, 1, 1 - ] if self.data_format == "channels_first" else [1, 1, 1, self.in_channels] - else: - raise ValueError("Either inputs_shape or in_channels must be specified for build.") - _out = self.layer( - tf.convert_to_tensor(np.random.uniform(size=inputs_shape), dtype=np.float32) - ) #np.random.uniform([1] + list(inputs_shape))) # initialize weights - outputs_shape = _out.shape - self._trainable_weights = self.layer.weights - - def forward(self, inputs): - outputs = self.layer(inputs) - return outputs - - -class DeConv3d(Layer): - """Simplified version of :class:`DeConv3dLayer`, see `tf.nn.conv3d_transpose `__. - - Parameters - ---------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (depth, height, width). - strides : tuple of int - The stride step (depth, height, width). - padding : str - The padding algorithm type: "SAME" or "VALID". - act : activation function - The activation function of this layer. - data_format : str - "channels_last" (NDHWC, default) or "channels_first" (NCDHW). - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip bias. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - -------- - With TensorLayer - - >>> net = tl.layers.Input([5, 100, 100, 100, 32], name='input') - >>> deconv3d = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), in_channels=32, name='DeConv3d_1') - >>> print(deconv3d) - >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='DeConv3d_2')(net) - >>> print(tensor) - - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3, 3), - strides=(2, 2, 2), - padding='SAME', - act=None, - data_format='channels_last', - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'decnn3d' - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = strides - self.padding = padding - self.data_format = data_format - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - # Attention: To build, we need not only the in_channels! Solved. - if self.in_channels is not None: - self.build(None) - self._built = True - - logging.info( - "DeConv3d %s: n_filters: %s strides: %s pad: %s act: %s" % ( - self.name, str(n_filter), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if len(strides) != 3: - raise ValueError("len(strides) should be 3, DeConv3d and DeConv3dLayer are different.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - # if self.dilation_rate != (1,) * len(self.dilation_rate): - # s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - self.layer = tf.keras.layers.Conv3DTranspose( - filters=self.n_filter, - kernel_size=self.filter_size, - strides=self.strides, - padding=self.padding, - data_format=self.data_format, - activation=self.act, - use_bias=(True if self.b_init is not None else False), - kernel_initializer=self.W_init, - bias_initializer=self.b_init, - name=self.name, - ) - if inputs_shape is not None: - self.in_channels = inputs_shape[1 if self.data_format == "channels_first" else -1] - elif self.in_channels is not None: - inputs_shape = [1, self.in_channels, 1, 1, 1 - ] if self.data_format == "channels_first" else [1, 1, 1, 1, self.in_channels] - else: - raise ValueError("Either inputs_shape or in_channels must be specified for build.") - _out = self.layer( - tf.convert_to_tensor(np.random.uniform(size=inputs_shape), dtype=np.float32) - ) #self.layer(np.random.uniform([1] + list(inputs_shape))) # initialize weights - outputs_shape = _out.shape - self._trainable_weights = self.layer.weights - - def forward(self, inputs): - outputs = self.layer(inputs) - return outputs diff --git a/tensorlayer/layers/convolution/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py deleted file mode 100644 index 5bdbd24c7..000000000 --- a/tensorlayer/layers/convolution/super_resolution.py +++ /dev/null @@ -1,202 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias, private_method -from tensorlayer.layers.core import Layer - -__all__ = [ - 'SubpixelConv1d', - 'SubpixelConv2d', -] - - -class SubpixelConv1d(Layer): - """It is a 1D sub-pixel up-sampling layer. - - Calls a TensorFlow function that directly implements this functionality. - We assume input has dim (batch, width, r) - - Parameters - ------------ - scale : int - The up-scaling ratio, a wrong setting will lead to Dimension size error. - act : activation function - The activation function of this layer. - in_channels : int - The number of in channels. - name : str - A unique layer name. - - Examples - ---------- - With TensorLayer - - >>> net = tl.layers.Input([8, 25, 32], name='input') - >>> subpixelconv1d = tl.layers.SubpixelConv1d(scale=2, name='subpixelconv1d')(net) - >>> print(subpixelconv1d) - >>> output shape : (8, 50, 16) - - References - ----------- - `Audio Super Resolution Implementation `__. - - """ - - def __init__( - self, - scale=2, - act=None, - in_channels=None, - name=None # 'subpixel_conv1d' - ): - super().__init__(name, act=act) - self.scale = scale - self.in_channels = in_channels - self.out_channels = int(self.in_channels / self.scale) - - if self.in_channels is not None: - self.build(None) - self._built = True - - logging.info( - "SubpixelConv1d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(in_channels={in_channels}, out_channels={out_channels}') - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if inputs_shape is not None: - self.in_channels = inputs_shape[-1] - self.out_channels = int(self.in_channels / self.scale) - - def forward(self, inputs): - outputs = self._PS(inputs, r=self.scale) - if self.act is not None: - outputs = self.act(outputs) - return outputs - - def _PS(self, I, r): - X = tf.transpose(a=I, perm=[2, 1, 0]) # (r, w, b) - X = tf.batch_to_space(input=X, block_shape=[r], crops=[[0, 0]]) # (1, r*w, b) - X = tf.transpose(a=X, perm=[2, 1, 0]) - return X - - -class SubpixelConv2d(Layer): - """It is a 2D sub-pixel up-sampling layer, usually be used - for Super-Resolution applications, see `SRGAN `__ for example. - - Parameters - ------------ - scale : int - The up-scaling ratio, a wrong setting will lead to dimension size error. - n_out_channel : int or None - The number of output channels. - - If None, automatically set n_out_channel == the number of input channels / (scale x scale). - - The number of input channels == (scale x scale) x The number of output channels. - act : activation function - The activation function of this layer. - in_channels : int - The number of in channels. - name : str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> # examples here just want to tell you how to set the n_out_channel. - >>> net = tl.layers.Input([2, 16, 16, 4], name='input1') - >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=2, n_out_channel=1, name='subpixel_conv2d1')(net) - >>> print(subpixelconv2d) - >>> output shape : (2, 32, 32, 1) - - >>> net = tl.layers.Input([2, 16, 16, 4*10], name='input2') - >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=2, n_out_channel=10, name='subpixel_conv2d2')(net) - >>> print(subpixelconv2d) - >>> output shape : (2, 32, 32, 10) - - >>> net = tl.layers.Input([2, 16, 16, 25*10], name='input3') - >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=5, n_out_channel=10, name='subpixel_conv2d3')(net) - >>> print(subpixelconv2d) - >>> output shape : (2, 80, 80, 10) - - References - ------------ - - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network `__ - - """ - - # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py - def __init__( - self, - scale=2, - n_out_channels=None, - act=None, - in_channels=None, - name=None # 'subpixel_conv2d' - ): - super().__init__(name, act=act) - self.scale = scale - self.n_out_channels = n_out_channels - self.in_channels = in_channels - - if self.in_channels is not None: - self.build(None) - self._built = True - logging.info( - "SubpixelConv2d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(in_channels={in_channels}, out_channels={n_out_channels}') - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - - if inputs_shape is not None: - self.in_channels = inputs_shape[-1] - - if self.in_channels / (self.scale**2) % 1 != 0: - raise Exception( - "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" - ) - self.n_out_channels = int(self.in_channels / (self.scale**2)) - - def forward(self, inputs): - outputs = self._PS(X=inputs, r=self.scale, n_out_channels=self.n_out_channels) - if self.act is not None: - outputs = self.act(outputs) - return outputs - - def _PS(self, X, r, n_out_channels): - - _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" - - if n_out_channels >= 1: - if int(X.get_shape()[-1]) != (r**2) * n_out_channels: - raise Exception(_err_log) - - X = tf.compat.v1.depth_to_space(input=X, block_size=r) - else: - raise RuntimeError(_err_log) - - return X diff --git a/tensorlayer/layers/convolution/ternary_conv.py b/tensorlayer/layers/convolution/ternary_conv.py deleted file mode 100644 index a75630a9f..000000000 --- a/tensorlayer/layers/convolution/ternary_conv.py +++ /dev/null @@ -1,162 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import compute_alpha, ternary_operation - -__all__ = ['TernaryConv2d'] - - -class TernaryConv2d(Layer): - """ - The :class:`TernaryConv2d` class is a 2D ternary CNN layer, which weights are either -1 or 1 or 0 while inference. - - Note that, the bias vector would not be tenarized. - - Parameters - ---------- - n_filter : int - The number of filters. - filter_size : tuple of int - The filter size (height, width). - strides : tuple of int - The sliding window strides of corresponding input dimensions. - It must be in the same order as the ``shape`` parameter. - act : activation function - The activation function of this layer. - padding : str - The padding algorithm type: "SAME" or "VALID". - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inference. - TODO: support gemm - data_format : str - "channels_last" (NHWC, default) or "channels_first" (NCHW). - dilation_rate : tuple of int - Specifying the dilation rate to use for dilated convolution. - W_init : initializer - The initializer for the the weight matrix. - b_init : initializer or None - The initializer for the the bias vector. If None, skip biases. - in_channels : int - The number of in channels. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([8, 12, 12, 32], name='input') - >>> ternaryconv2d = tl.layers.QuanConv2d( - ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d' - ... )(net) - >>> print(ternaryconv2d) - >>> output shape : (8, 12, 12, 64) - - """ - - def __init__( - self, - n_filter=32, - filter_size=(3, 3), - strides=(1, 1), - act=None, - padding='SAME', - use_gemm=False, - data_format="channels_last", - dilation_rate=(1, 1), - W_init=tl.initializers.truncated_normal(stddev=0.02), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None # 'ternary_cnn2d', - ): - super().__init__(name, act=act) - self.n_filter = n_filter - self.filter_size = filter_size - self.strides = self._strides = strides - self.padding = padding - self.use_gemm = use_gemm - self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels: - self.build(None) - self._built = True - - logging.info( - "TernaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( - self.name, n_filter, str(filter_size), str(strides), padding, - self.act.__name__ if self.act is not None else 'No Activation' - ) - ) - - if use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - if len(self.strides) != 2: - raise ValueError("len(strides) should be 2.") - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ( - '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' - ) - if self.dilation_rate != (1, ) * len(self.dilation_rate): - s += ', dilation={dilation_rate}' - if self.b_init is None: - s += ', bias=False' - s += (', ' + actstr) - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.data_format == 'channels_last': - self.data_format = 'NHWC' - if self.in_channels is None: - self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] - elif self.data_format == 'channels_first': - self.data_format = 'NCHW' - if self.in_channels is None: - self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] - else: - raise Exception("data_format should be either channels_last or channels_first") - - self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) - - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - - def forward(self, inputs): - - alpha = compute_alpha(self.W) - - W_ = ternary_operation(self.W) - W_ = tf.multiply(alpha, W_) - - outputs = tf.nn.conv2d( - input=inputs, filters=W_, strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate, name=self.name - ) - - if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add') - if self.act: - outputs = self.act(outputs) - - return outputs diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py deleted file mode 100644 index 023d510a2..000000000 --- a/tensorlayer/layers/core.py +++ /dev/null @@ -1,730 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import inspect -from abc import abstractmethod - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import (deprecated_alias, private_method, protected_method) -from tensorlayer.files import utils -from tensorlayer.layers.utils import (get_variable_with_initializer, list_remove_repeat) - -__all__ = ['Layer', 'ModelLayer', 'LayerList'] - -_global_layer_name_dict = {} # TODO: better implementation? - -_act_dict = { - "relu": tf.nn.relu, - "relu6": tf.nn.relu6, - "leaky_relu": tf.nn.leaky_relu, - "lrelu": tf.nn.leaky_relu, - "softplus": tf.nn.softplus, - "tanh": tf.nn.tanh, - "sigmoid": tf.nn.sigmoid, -} - - -def str2act(act): - if len(act) > 5 and act[0:5] == "lrelu": - try: - alpha = float(act[5:]) - return lambda x: tf.nn.leaky_relu(x, alpha=alpha) - except Exception as e: - raise Exception("{} can not be parsed as a float".format(act[5:])) - - if len(act) > 10 and act[0:10] == "leaky_relu": - try: - alpha = float(act[10:]) - return lambda x: tf.nn.leaky_relu(x, alpha=alpha) - except Exception as e: - raise Exception("{} can not be parsed as a float".format(act[10:])) - - if act not in _act_dict.keys(): - raise Exception("Unsupported act: {}".format(act)) - return _act_dict[act] - - -class Layer(object): - """The basic :class:`Layer` class represents a single layer of a neural network. - - It should be subclassed when implementing new types of layers. - - Parameters - ---------- - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the Layer. - __call__() - (1) Building the Layer if necessary. (2) Forwarding the computation. - all_weights() - Return a list of Tensor which are all weights of this Layer. - trainable_weights() - Return a list of Tensor which are all trainable weights of this Layer. - nontrainable_weights() - Return a list of Tensor which are all nontrainable weights of this Layer. - build() - Abstract method. Build the Layer. All trainable weights should be defined in this function. - forward() - Abstract method. Forward computation and return computation results. - - """ - - def __init__(self, name=None, act=None, *args, **kwargs): - """ - Initializing the Layer. - - :param name: str or None - :param name: str or function or None - """ - - # Layer constants - # for key in kwargs.keys(): - # setattr(self, key, self._argument_dict_checkup(kwargs[key])) - - # Auto naming if the name is not given - global _global_layer_name_dict - if name is None: - prefix = self.__class__.__name__.lower() - - if _global_layer_name_dict.get(prefix) is not None: - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - _global_layer_name_dict[prefix] = 0 - name = prefix - while True: - if _global_layer_name_dict.get(name) is None: - break - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - if _global_layer_name_dict.get(name) is not None: - pass - # raise ValueError( - # 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % name - # ) - else: - _global_layer_name_dict[name] = 0 - - self.name = name - if isinstance(act, str): - self.act = str2act(act) - else: - self.act = act - - # Layer building state - self._built = False - - # Layer nodes state - self._nodes = [] - self._nodes_fixed = False - - # Layer weight state - self._all_weights = None - self._trainable_weights = [] - self._nontrainable_weights = [] - - # nested layers - self._layers = None - - # Layer training state - self.is_train = True - - # layer config and init_args - self._config = None - self.layer_args = self._get_init_args(skip=3) - - @staticmethod - def _compute_shape(tensors): - if isinstance(tensors, list): - shape_mem = [t.get_shape().as_list() for t in tensors] - else: - shape_mem = tensors.get_shape().as_list() - return shape_mem - - @property - def config(self): - # if not self._nodes_fixed: - # raise RuntimeError("Model can not be saved when nodes are not fixed.") - if self._config is not None: - return self._config - else: - _config = {} - _config.update({'class': self.__class__.__name__.split('.')[-1]}) - self.layer_args.update(self.get_args()) - self.layer_args["name"] = self.name - _config.update({"args": self.layer_args}) - if self.__class__.__name__ in tl.layers.inputs.__all__: - _config.update({'prev_layer': None}) - else: - _config.update({'prev_layer': []}) - for node in self._nodes: - in_nodes = node.in_nodes - if not isinstance(in_nodes, list): - prev_name = in_nodes.name - else: - prev_name = [in_node.name for in_node in in_nodes] - if len(prev_name) == 1: - prev_name = prev_name[0] - _config['prev_layer'].append(prev_name) - if self._nodes_fixed: - self._config = _config - return _config - - @property - def all_weights(self): - if self._all_weights is not None and len(self._all_weights) > 0: - pass - else: - self._all_weights = self.trainable_weights + self.nontrainable_weights - return self._all_weights - - @property - def trainable_weights(self): - nested = self._collect_sublayers_attr('trainable_weights') - return self._trainable_weights + nested - - @property - def nontrainable_weights(self): - nested = self._collect_sublayers_attr('nontrainable_weights') - return self._nontrainable_weights + nested - - @property - def weights(self): - raise Exception( - "no property .weights exists, do you mean .all_weights, .trainable_weights, or .nontrainable_weights ?" - ) - - def _collect_sublayers_attr(self, attr): - if attr not in ['trainable_weights', 'nontrainable_weights']: - raise ValueError( - "Only support to collect some certain attributes of nested layers," - "e.g. 'trainable_weights', 'nontrainable_weights', but got {}".format(attr) - ) - if self._layers is None: - return [] - nested = [] - for layer in self._layers: - value = getattr(layer, attr) - if value is not None: - nested.extend(value) - return nested - - def __call__(self, inputs, *args, **kwargs): - """ - (1) Build the Layer if necessary. - (2) Forward the computation and return results. - (3) Add LayerNode if necessary - - :param prev_layer: np.ndarray, Tensor, Layer, list of Layers - :param kwargs: - :return: Layer - """ - if self.__class__.__name__ in tl.layers.inputs.__all__: - input_tensors = tf.convert_to_tensor(inputs) - else: - input_tensors = inputs - - if not self._built: - if isinstance(self, LayerList): - self._input_tensors = input_tensors - inputs_shape = self._compute_shape(input_tensors) - self.build(inputs_shape) - self._built = True - - outputs = self.forward(input_tensors, *args, **kwargs) - - if not self._nodes_fixed: - self._add_node(input_tensors, outputs) - - return outputs - - def _add_node(self, input_tensors, output_tensors): - """Add a LayerNode for this layer given input_tensors, output_tensors. - - WARINING: This function should not be called from outside, it should only be called - in layer.__call__ when building static model. - - Parameters - ---------- - input_tensors : Tensor or a list of tensors - Input tensors to this layer. - output_tensors : Tensor or a list of tensors - Output tensors to this layer. - - """ - inputs_list = tolist(input_tensors) - outputs_list = tolist(output_tensors) - - if self.__class__.__name__ in tl.layers.inputs.__all__: - # for InputLayer, there should be no in_nodes - in_nodes = [] - in_tensor_idxes = [0] - else: - in_nodes = [tensor._info[0] for tensor in inputs_list] - in_tensor_idxes = [tensor._info[1] for tensor in inputs_list] - node_index = len(self._nodes) - - new_node = LayerNode(self, node_index, in_nodes, inputs_list, outputs_list, in_tensor_idxes) - self._nodes.append(new_node) - for idx, tensor in enumerate(outputs_list): - tensor._info = (new_node, idx) # FIXME : modify tensor outside layers? how to deal? - - def _release_memory(self): - """ - WARINING: This function should be called with great caution. - - self.inputs and self.outputs will be set as None but not deleted in order to release memory. - """ - # FIXME : not understand why saving inputs/outputs shape - for node in self._nodes: - node.in_tensors = None - node.out_tensors = None - - def _set_mode_for_layers(self, is_train): - """ Set training/evaluation mode for the Layer""" - self.is_train = is_train - - def _fix_nodes_for_layers(self): - """ fix LayerNodes to stop growing for this layer""" - self._nodes_fixed = True - - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): - """ Get trainable variables. """ - weight = get_variable_with_initializer(scope_name=self.name, var_name=var_name, shape=shape, init=init) - if trainable is True: - if self._trainable_weights is None: - self._trainable_weights = list() - self._trainable_weights.append(weight) - else: - if self._nontrainable_weights is None: - self._nontrainable_weights = list() - self._nontrainable_weights.append(weight) - return weight - - @abstractmethod - def build(self, inputs_shape): - """ - An abstract method which should be overwritten in derived classes - to define all necessary trainable weights of the layer. - - self.built should be set as True after self.build() is called. - - :param inputs_shape: tuple - """ - raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - - @abstractmethod - def forward(self, inputs): - """ - An abstract method which should be overwritten in derived classes - to define forward feeding operations of the layer. - - :param inputs: Tensor - :return: Tensor - """ - raise Exception("The forward method must be implemented by inherited class") - - @abstractmethod - def __repr__(self): - reprstr = "Layer" - return reprstr - - def __setitem__(self, key, item): - raise TypeError("The Layer API does not allow to use the method: `__setitem__`") - - def __delitem__(self, key): - raise TypeError("The Layer API does not allow to use the method: `__delitem__`") - - def __setattr__(self, key, value): - if isinstance(value, Layer): - value._fix_nodes_for_layers() - if self._layers is None: - self._layers = [] - self._layers.append(value) - super().__setattr__(key, value) - - def __delattr__(self, name): - value = getattr(self, name, None) - if isinstance(value, Layer): - self._layers.remove(value) - super().__delattr__(name) - - @protected_method - def get_args(self): - init_args = {"layer_type": "normal"} - return init_args - - @protected_method - def _get_init_args(self, skip=3): - """Get all arguments of current layer for saving the graph.""" - stack = inspect.stack() - - if len(stack) < skip + 1: - raise ValueError("The length of the inspection stack is shorter than the requested start position.") - - args, _, _, values = inspect.getargvalues(stack[skip][0]) - - params = {} - - for arg in args: - - # some args dont need to be saved into the graph. e.g. the input placeholder - if values[arg] is not None and arg not in ['self', 'prev_layer', 'inputs']: - - val = values[arg] - - if arg == "dtype" and isinstance(val, tf.DType): - params[arg] = repr(val) - continue - - # change function (e.g. act) into dictionary of module path and function name - if inspect.isfunction(val): - if ("__module__" in dir(val)) and (len(val.__module__) > 10) and (val.__module__[0:10] - == "tensorflow"): - params[arg] = val.__name__ - else: - params[arg] = ('is_Func', utils.func2str(val)) - # ignore more args e.g. TL initializer - elif arg.endswith('init'): - continue - # for other data type, save them directly - else: - params[arg] = val - - return params - - -class LayerNode(object): - """ - The class :class:`LayerNode` class represents a conceptional node for a layer. - - LayerNode is used for building static model and it is actually a light weighted - wrapper over Layer. Specifically, it is used for building static computational graph - (see _construct_graph() in tl.models.Model). In static model, each layer relates to - one or more LayerNode, and the connection relationship between layers is built upon - LayerNode. In addition, LayerNode eases layer reuse and weights sharing. - - Parameters - ---------- - layer : tl.layers.Layer - A tl layer that wants to create a node. - node_index : int - Index of this node in layer._nodes. - in_nodes :a list of LayerNode - Father nodes to this node. - in_tensors : a list of tensors - Input tensors to this node. - out_tensors : a list of tensors - Output tensors to this node. - in_tensor_idxes : a list of int - Indexes of each input tensor in its corresponding node's out_tensors. - - Methods - --------- - __init__() - Initializing the LayerNode. - __call__() - (1) Forwarding through the layer. (2) Update its input/output tensors. - """ - - def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes): - """ - - Parameters - ---------- - layer - node_index - in_nodes - in_tensors - out_tensors - in_tensor_idxes - """ - self.layer = layer - self.node_index = node_index - self.in_nodes = in_nodes - self.out_nodes = [] - self.in_tensors = in_tensors - self.out_tensors = out_tensors - self.name = layer.name + "_node_{}".format(node_index) - - self.in_tensors_idxes = in_tensor_idxes - - self.visited = False - - def __call__(self, inputs, **kwargs): - """(1) Forwarding through the layer. (2) Update its input/output tensors.""" - outputs = self.layer.forward(inputs, **kwargs) - self.in_tensors = tolist(inputs) - self.out_tensors = tolist(outputs) - return self.out_tensors - - -class ModelLayer(Layer): - """ - The class :class:`ModelLayer` converts a :class:`Model` to a :class:`Layer` instance. - - Note that only a :class:`Model` with specified inputs and outputs can be converted to a :class:`ModelLayer`. - For example, a customized model in dynamic eager mode normally does NOT have specified inputs and outputs so the - customized model in dynamic eager mode can NOT be converted to a :class:`ModelLayer`. - - Parameters - ---------- - model: tl.models.Model - A model. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the ModelLayer. - weights() - Same as the weights of the given model. - build() - Do nothing because the given model has already been built. - forward() - Forward the computation. Simply call the forward() of the given model. - """ - - def __init__(self, model, name=None): - """ - Initializing the ModelLayer given a instance of Model. - - :param model: tl.models.Model - """ - super(ModelLayer, self).__init__(name=name) - - self.model = model - - # Layer building state - self._built = True - - # Layer weight state - self._all_weights = model.all_weights - self._trainable_weights = model.trainable_weights - self._nontrainable_weights = model.nontrainable_weights - - # Layer training state - self.is_train = True - - logging.info("ModelLayer %s from Model: %s" % (self.name, self.model.name)) - - def __repr__(self): - tmpstr = 'ModelLayer' + '(\n' - - modstr = self.model.__repr__() - modstr = _addindent(modstr, 2) - - tmpstr += modstr + ')' - return tmpstr - - def build(self, inputs_shape): - pass - - def forward(self, inputs): - return self.model.forward(inputs) - - def _set_mode_for_layers(self, is_train): - """ Set training/evaluation mode for the ModelLayer.""" - self.is_train = is_train - return self.model._set_mode_for_layers(is_train) - - def _fix_nodes_for_layers(self): - """ fix LayerNodes to stop growing for this ModelLayer.""" - self._nodes_fixed = True - self.model._fix_nodes_for_layers() - - def _release_memory(self): - """ - WARINING: This function should be called with great caution. - - self.inputs and self.outputs will be set as None but not deleted in order to release memory. - """ - - super(ModelLayer, self)._release_memory() - self.model.release_memory() - - def get_args(self): - init_args = {} - init_args.update({"layer_type": "modellayer"}) - # init_args["model"] = utils.net2static_graph(self.layer_args["model"]) - init_args["model"] = self.layer_args["model"].config - return init_args - - -class LayerList(Layer): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - self.layers = layers - - is_built = True - for layer in self.layers: - self._trainable_weights.extend(layer.trainable_weights) - self._nontrainable_weights.extend(layer.nontrainable_weights) - if layer._built is False: - is_built = False - if layer._built and layer.all_weights is not None: - # some layers in the list passed in have already been built - # e.g. using input shape to construct layers in dynamic eager - if self._all_weights is None: - self._all_weights = list() - self._all_weights.extend(layer.all_weights) - if is_built: - self._built = True - - logging.info( - "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) - ) - - # check layer name uniqueness in LayerList - local_layer_name_set = set() - for layer in self.layers: - if layer.name not in local_layer_name_set: - local_layer_name_set.add(layer.name) - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return LayerList(list(self.layers)[idx]) - else: - return self.layers[idx] - - def __len__(self): - return len(self.layers) - - def __repr__(self): - tmpstr = 'LayerList' + '(\n' - for idx, layer in enumerate(self.layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' - - tmpstr = tmpstr + ')' - return tmpstr - - def build(self, inputs_shape): - """ - Build the LayerList. The layer instances will be connected automatically one by one. - """ - in_tensor = self._input_tensors - # in_layer = self._input_layer - for layer in self.layers: - is_build = layer._built - out_tensor = layer(in_tensor) - # nlayer = layer(in_layer) - if is_build is False and layer.all_weights is not None: - if self._all_weights is None: - self._all_weights = list() - self._all_weights.extend(layer.all_weights) - layer._built = True - in_tensor = out_tensor - # in_layer = nlayer - - def forward(self, inputs): - """ - Forward the computation. The computation will go through all layer instances. - """ - z = inputs - for layer in self.layers: - z = layer.forward(z) - return z - - def _set_mode_for_layers(self, is_train): - """Set training/evaluation mode for all layer instances.""" - self.is_train = is_train - for layer in self.layers: - if isinstance(layer, ModelLayer): - layer._set_mode_for_layers(is_train) - elif isinstance(layer, LayerList): - layer._set_mode_for_layers(is_train) - else: - layer.is_train = is_train - - def _fix_nodes_for_layers(self): - """ fix LayerNodes to stop growing for this LayerList.""" - self._nodes_fixed = True - for layer in self.layers: - layer._fix_nodes_for_layers() - - def _release_memory(self): - """ - WARINING: This function should be called with great caution. - - self.inputs and self.outputs will be set as None but not deleted. - """ - super(LayerList, self)._release_memory() - for layer in self.layers: - layer._release_memory() - - def get_args(self): - init_args = {} - layers = self.layer_args["layers"] - init_args["layers"] = [layer.config for layer in layers] - init_args.update({"layer_type": "layerlist"}) - return init_args - - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s - - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] diff --git a/tensorlayer/layers/core/__init__.py b/tensorlayer/layers/core/__init__.py new file mode 100644 index 000000000..b2898a330 --- /dev/null +++ b/tensorlayer/layers/core/__init__.py @@ -0,0 +1,8 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from tensorlayer.backend import BACKEND +if BACKEND == 'mindspore': + from .core_mindspore import * +elif BACKEND in ['tensorflow', 'dragon']: + from .core_tensorflow_dragon import * diff --git a/tensorlayer/layers/core/common.py b/tensorlayer/layers/core/common.py new file mode 100644 index 000000000..9a67e20c7 --- /dev/null +++ b/tensorlayer/layers/core/common.py @@ -0,0 +1,34 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl + +_act_dict = { + "relu": tl.ops.ReLU, + "relu6": tl.ops.ReLU6, + "leaky_relu": tl.ops.LeakyReLU, + "lrelu": tl.ops.LeakyReLU, + "softplus": tl.ops.Softplus, + "tanh": tl.ops.Tanh, + "sigmoid": tl.ops.Sigmoid, +} + + +def str2act(act): + if len(act) > 5 and act[0:5] == "lrelu": + try: + alpha = float(act[5:]) + return tl.ops.LeakyReLU(alpha=alpha) + except Exception as e: + raise Exception("{} can not be parsed as a float".format(act[5:])) + + if len(act) > 10 and act[0:10] == "leaky_relu": + try: + alpha = float(act[10:]) + return tl.ops.LeakyReLU(alpha=alpha) + except Exception as e: + raise Exception("{} can not be parsed as a float".format(act[10:])) + + if act not in _act_dict.keys(): + raise Exception("Unsupported act: {}".format(act)) + return _act_dict[act] diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py new file mode 100644 index 000000000..0ff40f1ce --- /dev/null +++ b/tensorlayer/layers/core/core_mindspore.py @@ -0,0 +1,379 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from .common import str2act +from mindspore.nn import Cell +import os +import tensorlayer as tl +from tensorlayer.files import utils +from tensorlayer.layers.utils import (get_variable_with_initializer) +from tensorlayer import logging + +_global_layer_name_dict = {} # TODO: better implementation? + + +class Module(Cell): + + def __init__(self, name=None, act=None, *args, **kwargs): + super().__init__(*args, **kwargs) + + global _global_layer_name_dict + if name is None: + prefix = self.__class__.__name__.lower() + + if _global_layer_name_dict.get(prefix) is not None: + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + _global_layer_name_dict[prefix] = 0 + name = prefix + while True: + if _global_layer_name_dict.get(name) is None: + break + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + if _global_layer_name_dict.get(name) is not None: + pass + else: + _global_layer_name_dict[name] = 0 + + self.name = name + if isinstance(act, str): + self.act = str2act(act) + else: + if act: + self.act = act() + else: + self.act = act + + # Layer building state + self._built = False + + # Layer nodes state + self._nodes = [] + self._nodes_fixed = False + + # Layer weight state + self._all_weights = [] + self._trainable_weights = [] + self._nontrainable_weights = [] + + # Layer training state + self.is_train = True + + def forward(self, *inputs, **kwargs): + raise Exception("The forward method must be implemented by inherited class") + + def construct(self, *inputs, **kwargs): + return self.forward(*inputs, **kwargs) + + def build(self, inputs_shape): + raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") + + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + """ Get trainable variables. """ + weight = get_variable_with_initializer( + scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable + ) + self.trainable = trainable + return weight + + def save_weights(self, file_path, format=None): + """Input file_path, save model weights into a file of given format. + Use self.load_weights() to restore. + + Parameters + ---------- + file_path : str + Filename to which the model weights will be saved. + format : str or None + Saved file format. + Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. + If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. + 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of + the hdf5 file. + 3) 'npz' will save model weights sequentially into a npz file. + 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. + 5) 'ckpt' will save model weights into a tensorflow ckpt file. + + Default None. + + Examples + -------- + 1) Save model weights in hdf5 format by default. + >>> net = vgg16() + >>> net.save_weights('./model.h5') + ... + >>> net.load_weights('./model.h5') + + 2) Save model weights in npz/npz_dict format + >>> net = vgg16() + >>> net.save_weights('./model.npz') + >>> net.save_weights('./model.npz', format='npz_dict') + + """ + + # self.all_weights = self.network.all_weights + if self.all_weights is None or len(self.all_weights) == 0: + logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") + return + + if format is None: + postfix = file_path.split('.')[-1] + if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: + format = postfix + else: + format = 'hdf5' + + if format == 'hdf5' or format == 'h5': + utils.save_weights_to_hdf5(file_path, self) + elif format == 'npz': + utils.save_npz(self.all_weights, file_path) + elif format == 'npz_dict': + utils.save_npz_dict(self.all_weights, file_path) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." + "Other format is not supported now." + ) + + def load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights(). + + Parameters + ---------- + file_path : str + Filename from which the model weights will be loaded. + format : str or None + If not specified (None), the postfix of the file_path will be used to decide its format. If specified, + value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + In addition, it should be the same format when you saved the file using self.save_weights(). + Default is None. + in_order : bool + Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. + If 'in_order' is True, weights from the file will be loaded into model in a sequential way. + If 'in_order' is False, weights from the file will be loaded into model by matching the name + with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from + a weights file which is saved in graph(eager) mode. + Default is True. + skip : bool + Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is + 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights + whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will + occur when mismatch is found. + Default is False. + + Examples + -------- + 1) load model from a hdf5 file. + >>> net = vgg16() + >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> net.load_weights('./model_eager.h5') # load sequentially + + 2) load model from a npz file + >>> net.load_weights('./model.npz') + + 2) load model from a npz file, which is saved as npz_dict previously + >>> net.load_weights('./model.npz', format='npz_dict') + + Notes + ------- + 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is + saved in a different mode, it is recommended to set 'in_order' be True. + 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, + 'in_order' argument will be ignored. + + """ + if not os.path.exists(file_path): + raise FileNotFoundError("file {} doesn't exist.".format(file_path)) + + if format is None: + format = file_path.split('.')[-1] + + if format == 'hdf5' or format == 'h5': + if skip ==True or in_order == False: + # load by weights name + utils.load_hdf5_to_weights(file_path, self, skip) + else: + # load in order + utils.load_hdf5_to_weights_in_order(file_path, self) + elif format == 'npz': + utils.load_and_assign_npz(file_path, self) + elif format == 'npz_dict': + utils.load_and_assign_npz_dict(file_path, self, skip) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " + "Other format is not supported now." + ) + + @staticmethod + def _compute_shape(tensors): + if isinstance(tensors, list): + shape_mem = [tl.get_tensor_shape(t) for t in tensors] + else: + shape_mem = tl.get_tensor_shape(tensors) + return shape_mem + + def _add_node(self, input_tensors, output_tensors): + """Add a LayerNode for this layer given input_tensors, output_tensors. + + WARINING: This function should not be called from outside, it should only be called + in layer.__call__ when building static model. + + Parameters + ---------- + input_tensors : Tensor or a list of tensors + Input tensors to this layer. + output_tensors : Tensor or a list of tensors + Output tensors to this layer. + + """ + raise NotImplementedError + + def set_train(self): + """ + Sets the cell to training mode. + + The cell itself and all children cells will be set to training mode. + + Args: + mode (bool): Specifies whether the model is training. Default: True. + """ + self._phase = 'train' + self.add_flags_recursive(training=True) + return self + + def eval(self): + """Set this network in evaluation mode. After calling this method, + all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.models.vgg16() + >>> net.eval() + # do evaluation + + """ + self._phase = 'predict' + self.add_flags_recursive(training=False) + return self + + @property + def trainable_weights(self): + """ + Returns all trainable weights. + + Returns a list of all trainable parmeters. + + Args: + recurse (bool): Whether contains the trainable weights of sublayers. Default: True. + + Returns: + List, the list of trainable weights. + """ + return list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) + + @property + def nontrainable_weights(self): + """ + Returns all untrainable weights. + + Returns a list of all untrainable weights. + + Args: + recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. + + Returns: + List, the list of untrainable weights. + """ + return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True))) + + @property + def all_weights(self): + return list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) \ + + list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True))) + + +class LayerNode(object): + """ + The class :class:`LayerNode` class represents a conceptional node for a layer. + + LayerNode is used for building static model and it is actually a light weighted + wrapper over Layer. Specifically, it is used for building static computational graph + (see _construct_graph() in tl.models.Model). In static model, each layer relates to + one or more LayerNode, and the connection relationship between layers is built upon + LayerNode. In addition, LayerNode eases layer reuse and weights sharing. + + Parameters + ---------- + layer : tl.layers.Layer + A tl layer that wants to create a node. + node_index : int + Index of this node in layer._nodes. + in_nodes :a list of LayerNode + Father nodes to this node. + in_tensors : a list of tensors + Input tensors to this node. + out_tensors : a list of tensors + Output tensors to this node. + in_tensor_idxes : a list of int + Indexes of each input tensor in its corresponding node's out_tensors. + + Methods + --------- + __init__() + Initializing the LayerNode. + __call__() + (1) Forwarding through the layer. (2) Update its input/output tensors. + """ + + def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes): + """ + + Parameters + ---------- + layer + node_index + in_nodes + in_tensors + out_tensors + in_tensor_idxes + """ + self.layer = layer + self.node_index = node_index + self.in_nodes = in_nodes + self.out_nodes = [] + self.in_tensors = in_tensors + self.out_tensors = out_tensors + self.name = layer.name + "_node_{}".format(node_index) + + self.in_tensors_idxes = in_tensor_idxes + + self.visited = False + + def __call__(self, inputs, **kwargs): + """(1) Forwarding through the layer. (2) Update its input/output tensors.""" + outputs = self.layer.forward(inputs, **kwargs) + self.in_tensors = tolist(inputs) + self.out_tensors = tolist(outputs) + return self.out_tensors + + +def tolist(tensors): + if isinstance(tensors, list) or isinstance(tensors, tuple): + ntensors = list() + for t in tensors: + ntensors += tolist(t) + return ntensors + else: + return [tensors] diff --git a/tensorlayer/layers/core/core_tensorflow_dragon.py b/tensorlayer/layers/core/core_tensorflow_dragon.py new file mode 100644 index 000000000..4078c87df --- /dev/null +++ b/tensorlayer/layers/core/core_tensorflow_dragon.py @@ -0,0 +1,666 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +from .common import str2act +from tensorlayer.backend.ops.load_backend import BACKEND +from collections import OrderedDict +import time, os +import tensorlayer as tl +from tensorlayer.decorators import (protected_method) +from tensorlayer.files import utils +from tensorlayer.layers.utils import (get_variable_with_initializer) +from tensorlayer import logging + +_global_layer_name_dict = {} # TODO: better implementation? + +if BACKEND == 'tensorflow': + import tensorflow as tf + Parameter_ = tf.Variable +elif BACKEND == 'dragon': + import dragon as dg + Parameter_ = dg.Tensor # TODO the dragon parameter is a initializers +else: + raise NotImplementedError("This backend is not supported") + + +class Module(object): + + def __init__(self, name=None, act=None, *args, **kwargs): + self._params = OrderedDict() + self._layers = OrderedDict() + self._params_status = OrderedDict() + self._parameter_layout_dict = {} + self._create_time = int(time.time() * 1e9) + + global _global_layer_name_dict + if name is None: + prefix = self.__class__.__name__.lower() + + if _global_layer_name_dict.get(prefix) is not None: + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + _global_layer_name_dict[prefix] = 0 + name = prefix + while True: + if _global_layer_name_dict.get(name) is None: + break + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + if _global_layer_name_dict.get(name) is not None: + pass + else: + _global_layer_name_dict[name] = 0 + + self.name = name + if isinstance(act, str): + self.act = str2act(act) + else: + if act: + self.act = act() + else: + self.act = act + + # Layer building state + self._built = False + + # Layer nodes state + self._nodes = [] + self._nodes_fixed = False + + # Layer weight state + self._all_weights = [] + self._trainable_weights = [] + self._nontrainable_weights = [] + + # nested layers + # self._layers = None + + # Layer training state + self.is_train = True + + def extend_repr(self): + """ + Sets the extended representation of the Cell. + + To print customized extended information, re-implement this method in your own cells. + """ + return '' + + def __repr__(self): + extra_str = self.extend_repr() + info_str = self.__class__.__name__ + '<' + if self._layers: + sub_str = '\n' + if extra_str: + sub_str += '{}\n'.format(self.extend_repr()) + for key, value in self._layers.items(): + sub_str += '({}): {}\n'.format(key, repr(value)) + sub_str = sub_str.replace('\n', '\n ') + '>' + info_str += sub_str + else: + info_str += extra_str + '>' + return info_str + + def __setattr__(self, name, value): + layers = self.__dict__.get('_layers') + params = self.__dict__.get('_params') + + if isinstance(value, Parameter_): + if params is None: + raise AttributeError("Can not assign params before Module.__init__() call.") + if name in self.__dict__: + if self.__dict__[name] is not None: + raise TypeError("Expected type is not in (Parameter, Module), but got Parameter.") + del self.__dict__[name] + if layers and name in layers: + raise TypeError("Expected type is Module, but got Parameter.") + self.insert_param_to_layer(name, value) + + elif isinstance(value, Module): + if layers is None: + raise AttributeError("Can not assign layers before Module.__init__() call.") + if name in self.__dict__: + del self.__dict__[name] + if params and name in params: + raise TypeError("Expected type is Parameter, but got Module.") + if value._built is False: + raise AttributeError( + "The registered layer `{}` should be built in advance. " + "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) + ) + layers[name] = value + else: + object.__setattr__(self, name, value) + + def __call__(self, *inputs, **kwargs): + if BACKEND in ['tensorflow', 'dragon']: + output = self.forward(*inputs) + else: + exit("Unsupported backend") + return output + + def forward(self, *inputs, **kwargs): + raise Exception("The forward method must be implemented by inherited class") + + def build(self, inputs_shape): + raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") + + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + """ Get trainable variables. """ + weight = get_variable_with_initializer( + scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable + ) + self.trainable = trainable + return weight + + def save_weights(self, file_path, format=None): + """Input file_path, save model weights into a file of given format. + Use self.load_weights() to restore. + + Parameters + ---------- + file_path : str + Filename to which the model weights will be saved. + format : str or None + Saved file format. + Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. + If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. + 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of + the hdf5 file. + 3) 'npz' will save model weights sequentially into a npz file. + 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. + 5) 'ckpt' will save model weights into a tensorflow ckpt file. + + Default None. + + Examples + -------- + 1) Save model weights in hdf5 format by default. + >>> net = vgg16() + >>> net.save_weights('./model.h5') + ... + >>> net.load_weights('./model.h5') + + 2) Save model weights in npz/npz_dict format + >>> net = vgg16() + >>> net.save_weights('./model.npz') + >>> net.save_weights('./model.npz', format='npz_dict') + + """ + + # self.all_weights = self.network.all_weights + if self.all_weights is None or len(self.all_weights) == 0: + logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") + return + + if format is None: + postfix = file_path.split('.')[-1] + if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: + format = postfix + else: + format = 'hdf5' + + if format == 'hdf5' or format == 'h5': + utils.save_weights_to_hdf5(file_path, self) + elif format == 'npz': + utils.save_npz(self.all_weights, file_path) + elif format == 'npz_dict': + utils.save_npz_dict(self.all_weights, file_path) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." + "Other format is not supported now." + ) + + def load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights(). + + Parameters + ---------- + file_path : str + Filename from which the model weights will be loaded. + format : str or None + If not specified (None), the postfix of the file_path will be used to decide its format. If specified, + value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + In addition, it should be the same format when you saved the file using self.save_weights(). + Default is None. + in_order : bool + Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. + If 'in_order' is True, weights from the file will be loaded into model in a sequential way. + If 'in_order' is False, weights from the file will be loaded into model by matching the name + with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from + a weights file which is saved in graph(eager) mode. + Default is True. + skip : bool + Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is + 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights + whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will + occur when mismatch is found. + Default is False. + + Examples + -------- + 1) load model from a hdf5 file. + >>> net = vgg16() + >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> net.load_weights('./model_eager.h5') # load sequentially + + 2) load model from a npz file + >>> net.load_weights('./model.npz') + + 2) load model from a npz file, which is saved as npz_dict previously + >>> net.load_weights('./model.npz', format='npz_dict') + + Notes + ------- + 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is + saved in a different mode, it is recommended to set 'in_order' be True. + 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, + 'in_order' argument will be ignored. + + """ + if not os.path.exists(file_path): + raise FileNotFoundError("file {} doesn't exist.".format(file_path)) + + if format is None: + format = file_path.split('.')[-1] + + if format == 'hdf5' or format == 'h5': + if skip ==True or in_order == False: + # load by weights name + utils.load_hdf5_to_weights(file_path, self, skip) + else: + # load in order + utils.load_hdf5_to_weights_in_order(file_path, self) + elif format == 'npz': + utils.load_and_assign_npz(file_path, self) + elif format == 'npz_dict': + utils.load_and_assign_npz_dict(file_path, self, skip) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " + "Other format is not supported now." + ) + + def _set_mode_for_layers(self, is_train): + """Set all layers of this network to a given mode. + + Parameters + ---------- + is_train : boolean + Network's mode. True means training mode while False means evaluation mode. + + """ + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + if isinstance(layer, Module): + layer.is_train = is_train + + def set_train(self): + """Set this network in training mode. After calling this method, + all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.vgg16() + >>> net.set_train() + + """ + if self.is_train !=True: + self.is_train = True + self._set_mode_for_layers(True) + + def eval(self): + """Set this network in evaluation mode. After calling this method, + all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.vgg16() + >>> net.eval() + # do evaluation + + """ + if self.is_train != False: + self.is_train = False + self._set_mode_for_layers(False) + + @staticmethod + def _compute_shape(tensors): + if isinstance(tensors, list): + shape_mem = [tl.get_tensor_shape(t) for t in tensors] + else: + shape_mem = tl.get_tensor_shape(tensors) + return shape_mem + + def insert_param_to_layer(self, param_name, param, check_name=True): + """ + Adds a parameter to the current layer. + + Inserts a parameter with given name to the layer. Please refer to the usage in + source code of `tensorlayer.layer.Module.__setattr__`. + + Args: + param_name (str): Name of the parameter. + param (Parameter): Parameter to be inserted to the layer. + check_name (bool): Determines whether the name input is compatible. Default: True. + + Raises: + KeyError: If the name of parameter is null or contains dot. + AttributeError: If user did not call init() first. + TypeError: If the type of parameter is not Parameter. + """ + if not param_name: + raise KeyError("The name of parameter should not be null.") + if check_name and '.' in param_name: + raise KeyError("The name of parameter should not contain \".\"") + if '_params' not in self.__dict__: + raise AttributeError("You need call init() first.") + if hasattr(self, param_name) and param_name not in self._params: + raise KeyError("Duplicated parameter name '{}'.".format(param_name)) + if not isinstance(param, Parameter_) and param is not None: + raise TypeError("The type of parameter should be 'Parameter' if not None.") + self._params[param_name] = param + try: + self._params_status[param_name] = self.trainable + except: + pass + + def _add_node(self, input_tensors, output_tensors): + """Add a LayerNode for this layer given input_tensors, output_tensors. + + WARINING: This function should not be called from outside, it should only be called + in layer.__call__ when building static model. + + Parameters + ---------- + input_tensors : Tensor or a list of tensors + Input tensors to this layer. + output_tensors : Tensor or a list of tensors + Output tensors to this layer. + + """ + raise NotImplementedError + + @property + def create_time(self): + return self._create_time + + def __getattr__(self, name): + if '_params' in self.__dict__: + params = self.__dict__['_params'] + if name in params: + return params[name] + if '_layers' in self.__dict__: + layers = self.__dict__['_layers'] + if name in layers: + return layers[name] + if '_params_status' in self.__dict__: + params_status = self.__dict__['_params_status'] + if name in params_status: + return params_status[name] + raise AttributeError("'{}' object has no attribute '{}'.".format(type(self).__name__, name)) + + def __delattr__(self, name): + if name in self._params: + del self._params[name] + elif name in self._layers: + del self._layers[name] + else: + object.__delattr__(self, name) + + @property + def trainable_weights(self): + """ + Returns all trainable weights. + + Returns a list of all trainable parmeters. + + Args: + recurse (bool): Whether contains the trainable weights of sublayers. Default: True. + + Returns: + List, the list of trainable weights. + """ + self.get_weights() + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] ==True: + self._trainable_weights.append(params[1]) + return self._trainable_weights + + @property + def nontrainable_weights(self): + """ + Returns all untrainable weights. + + Returns a list of all untrainable weights. + + Args: + recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. + + Returns: + List, the list of untrainable weights. + """ + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] == False: + self._nontrainable_weights.append(params[1]) + return self._nontrainable_weights + + @property + def all_weights(self): + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + for par, val in params: + self._all_weights.append(val) + return self._all_weights + + def get_weights(self, expand=True): + """ + Returns an iterator over layer weights. + + Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. + + Args: + expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples: + >>> net = Net() + >>> for item in net.get_weights(): + >>> print(item) + """ + for _, param in self.parameters_and_names(expand=expand): + yield param + + def check_names(self): + names = set("") + for value, param in self.parameters_and_names(): + if param.name in names: + raise ValueError( + "The value of {} is {}, its name '{}' already exists.".format(value, param, param.name) + ) + names.add(param.name) + + def parameters_and_names(self, name_prefix='', expand=True): + """ + Returns an iterator over layer parameters. + + Includes the parameter's name and itself. + + Args: + name_prefix (str): Namespace. Default: ''. + expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples: + >>> n = Net() + >>> names = [] + >>> for m in n.parameters_and_names(): + >>> if m[0]: + >>> names.append(m[0]) + """ + layers = [] + if expand: + layers = self.layers_and_names(name_prefix=name_prefix) + else: + layers.append((name_prefix, self)) + + params_set = set() + for layer_name, layer in layers: + params = layer._params.items() + for par_name, par in params: + if par.inited_param is not None: + par = par.inited_param + if par is not None and id(par) not in params_set: + params_set.add(id(par)) + par_new_name = par_name + if layer_name: + par_new_name = layer_name + '.' + par_new_name + + yield par_new_name, par + + def layers_and_names(self, layers=None, name_prefix=''): + """ + Returns an iterator over all layers in the network. + + Includes the layer's name and itself. + + Args: + layers (str): layers to iterate over. Default: None. + name_prefix (str): Namespace. Default: ''. + + Examples: + >>> n = Net() + >>> names = [] + >>> for m in n.layers_and_names(): + >>> if m[0]: + >>> names.append(m[0]) + """ + t_layers = layers if layers else set() + if self in t_layers: + return + + t_layers.add(self) + yield name_prefix, self + + for name, layer in self._layers.items(): + if layer: + layers_name_prefix = name + if name_prefix: + layers_name_prefix = name_prefix + '.' + layers_name_prefix + for ele in layer.layers_and_names(t_layers, layers_name_prefix): + yield ele + + def layers(self): + """Returns an iterator over immediate layers.""" + return self.name_layers().values() + + def name_layers(self): + """ + Returns an iterator over all layers in the network. + + Include name of the layer and layer itself. + """ + value_set = set() + layers = OrderedDict() + for name, layer in self._layers.items(): + if layer is not None and layer not in value_set: + value_set.add(layer) + layers[name] = layer + return layers + + +class LayerNode(object): + """ + The class :class:`LayerNode` class represents a conceptional node for a layer. + + LayerNode is used for building static model and it is actually a light weighted + wrapper over Layer. Specifically, it is used for building static computational graph + (see _construct_graph() in tl.models.Model). In static model, each layer relates to + one or more LayerNode, and the connection relationship between layers is built upon + LayerNode. In addition, LayerNode eases layer reuse and weights sharing. + + Parameters + ---------- + layer : tl.layers.Layer + A tl layer that wants to create a node. + node_index : int + Index of this node in layer._nodes. + in_nodes :a list of LayerNode + Father nodes to this node. + in_tensors : a list of tensors + Input tensors to this node. + out_tensors : a list of tensors + Output tensors to this node. + in_tensor_idxes : a list of int + Indexes of each input tensor in its corresponding node's out_tensors. + + Methods + --------- + __init__() + Initializing the LayerNode. + __call__() + (1) Forwarding through the layer. (2) Update its input/output tensors. + """ + + def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes): + """ + + Parameters + ---------- + layer + node_index + in_nodes + in_tensors + out_tensors + in_tensor_idxes + """ + self.layer = layer + self.node_index = node_index + self.in_nodes = in_nodes + self.out_nodes = [] + self.in_tensors = in_tensors + self.out_tensors = out_tensors + self.name = layer.name + "_node_{}".format(node_index) + + self.in_tensors_idxes = in_tensor_idxes + + self.visited = False + + def __call__(self, inputs, **kwargs): + """(1) Forwarding through the layer. (2) Update its input/output tensors.""" + outputs = self.layer.forward(inputs, **kwargs) + self.in_tensors = tolist(inputs) + self.out_tensors = tolist(outputs) + return self.out_tensors + + +def tolist(tensors): + if isinstance(tensors, list) or isinstance(tensors, tuple): + ntensors = list() + for t in tensors: + ntensors += tolist(t) + return ntensors + else: + return [tensors] diff --git a/tensorlayer/layers/dense/__init__.py b/tensorlayer/layers/dense/__init__.py index 557fbd070..c39d8f36b 100644 --- a/tensorlayer/layers/dense/__init__.py +++ b/tensorlayer/layers/dense/__init__.py @@ -5,24 +5,24 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ from .base_dense import * -from .binary_dense import * -from .dorefa_dense import * +# from .binary_dense import * +# from .dorefa_dense import * from .dropconnect import * from .quan_dense import * -from .quan_dense_bn import * -from .ternary_dense import * +# from .quan_dense_bn import * +# from .ternary_dense import * __all__ = [ - 'BinaryDense', + # 'BinaryDense', 'Dense', - 'DorefaDense', + # 'DorefaDense', 'DropconnectDense', - 'TernaryDense', + # 'TernaryDense', 'QuanDense', - 'QuanDenseWithBN', + # 'QuanDenseWithBN', ] diff --git a/tensorlayer/layers/dense/base_dense.py b/tensorlayer/layers/dense/base_dense.py index c24080432..2092cf39e 100644 --- a/tensorlayer/layers/dense/base_dense.py +++ b/tensorlayer/layers/dense/base_dense.py @@ -1,22 +1,16 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import numpy as np -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig +from tensorlayer.layers.core import Module __all__ = [ 'Dense', ] -class Dense(Layer): +class Dense(Module): """The :class:`Dense` class is a fully connected layer. Parameters @@ -40,10 +34,10 @@ class Dense(Layer): With TensorLayer >>> net = tl.layers.Input([100, 50], name='input') - >>> dense = tl.layers.Dense(n_units=800, act=tf.nn.relu, in_channels=50, name='dense_1') + >>> dense = tl.layers.Dense(n_units=800, act=tl.ops.relu, in_channels=50, name='dense_1') >>> print(dense) Dense(n_units=800, relu, in_channels='50', name='dense_1') - >>> tensor = tl.layers.Dense(n_units=800, act=tf.nn.relu, name='dense_2')(net) + >>> tensor = tl.layers.Dense(n_units=800, act=tl.ops.relu, name='dense_2')(net) >>> print(tensor) tf.Tensor([...], shape=(100, 800), dtype=float32) @@ -53,6 +47,7 @@ class Dense(Layer): """ + # @cell_attr_register def __init__( self, n_units, @@ -76,11 +71,11 @@ def __init__( logging.info( "Dense %s: %d %s" % - (self.name, self.n_units, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, self.n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(n_units={n_units}, ' + actstr) if self.in_channels is not None: s += ', in_channels=\'{in_channels}\'' @@ -97,15 +92,25 @@ def build(self, inputs_shape): else: self.in_channels = inputs_shape[1] shape = [inputs_shape[1], self.n_units] + self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) + + self.b_init_flag = False if self.b_init: self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) + self.b_init_flag = True + self.bias_add = tl.ops.BiasAdd() - # @tf.function - def forward(self, inputs): - z = tf.matmul(inputs, self.W) - if self.b_init: - z = tf.add(z, self.b) + self.act_init_flag = False if self.act: + self.act_init_flag = True + + self.matmul = tl.ops.MatMul() + + def forward(self, inputs): + z = self.matmul(inputs, self.W) + if self.b_init_flag: + z = self.bias_add(z, self.b) + if self.act_init_flag: z = self.act(z) return z diff --git a/tensorlayer/layers/dense/binary_dense.py b/tensorlayer/layers/dense/binary_dense.py deleted file mode 100644 index d4d152ac0..000000000 --- a/tensorlayer/layers/dense/binary_dense.py +++ /dev/null @@ -1,106 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import quantize - -__all__ = [ - 'BinaryDense', -] - - -class BinaryDense(Layer): - """The :class:`BinaryDense` class is a binary fully connected layer, which weights are either -1 or 1 while inferencing. - - Note that, the bias vector would not be binarized. - - Parameters - ---------- - n_units : int - The number of units of this layer. - act : activation function - The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`Sign` after :class:`BatchNorm`. - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inference. (TODO). - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels: int - The number of channels of the previous layer. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : None or str - A unique layer name. - - """ - - def __init__( - self, - n_units=100, - act=None, - use_gemm=False, - W_init=tl.initializers.truncated_normal(stddev=0.05), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None, #'binary_dense', - ): - super().__init__(name, act=act) - self.n_units = n_units - self.use_gemm = use_gemm - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels is not None: - self.build((None, self.in_channels)) - self._built = True - - logging.info( - "BinaryDense %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(n_units={n_units}, ' + actstr) - if self.in_channels is not None: - s += ', in_channels=\'{in_channels}\'' - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if len(inputs_shape) != 2: - raise Exception("The input dimension must be rank 2, please reshape or flatten it") - - if self.in_channels is None: - self.in_channels = inputs_shape[1] - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - n_in = inputs_shape[-1] - self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) - if self.b_init is not None: - self.b = self._get_weights("biases", shape=(self.n_units), init=self.b_init) - - def forward(self, inputs): - # W = tl.act.sign(W) # dont update ... - W_ = quantize(self.W) - # W = tf.Variable(W) - - outputs = tf.matmul(inputs, W_) - # self.outputs = xnor_gemm(self.inputs, W) # TODO - - if self.b_init is not None: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') - - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/dense/dorefa_dense.py b/tensorlayer/layers/dense/dorefa_dense.py deleted file mode 100644 index 4bc4f40df..000000000 --- a/tensorlayer/layers/dense/dorefa_dense.py +++ /dev/null @@ -1,113 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import cabs, quantize_active, quantize_weight - -__all__ = [ - 'DorefaDense', -] - - -class DorefaDense(Layer): - """The :class:`DorefaDense` class is a binary fully connected layer, which weights are 'bitW' bits and the output of the previous layer - are 'bitA' bits while inferencing. - - Note that, the bias vector would not be binarized. - - Parameters - ---------- - bitW : int - The bits of this layer's parameter - bitA : int - The bits of the output of previous layer - n_units : int - The number of units of this layer. - act : activation function - The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`Sign` after :class:`BatchNorm`. - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels: int - The number of channels of the previous layer. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : a str - A unique layer name. - - """ - - def __init__( - self, - bitW=1, - bitA=3, - n_units=100, - act=None, - use_gemm=False, - W_init=tl.initializers.truncated_normal(stddev=0.05), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None, #'dorefa_dense', - ): - super().__init__(name, act=act) - self.bitW = bitW - self.bitA = bitA - self.n_units = n_units - self.use_gemm = use_gemm - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels is not None: - self.build((None, self.in_channels)) - self._built = True - - logging.info( - "DorefaDense %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(n_units={n_units}, ' + actstr) - s += ', bitW={bitW}, bitA={bitA}' - if self.in_channels is not None: - s += ', in_channels=\'{in_channels}\'' - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if len(inputs_shape) != 2: - raise Exception("The input dimension must be rank 2, please reshape or flatten it") - - if self.in_channels is None: - self.in_channels = inputs_shape[1] - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - n_in = inputs_shape[-1] - self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) - if self.b_init is not None: - self.b = self._get_weights("biases", shape=(self.n_units), init=self.b_init) - - def forward(self, inputs): - inputs = quantize_active(cabs(inputs), self.bitA) - W_ = quantize_weight(self.W, self.bitW) - outputs = tf.matmul(inputs, W_) - # self.outputs = xnor_gemm(self.inputs, W) # TODO - if self.b_init is not None: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') - # self.outputs = xnor_gemm(self.inputs, W) + b # TODO - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/dense/dropconnect.py b/tensorlayer/layers/dense/dropconnect.py index 43c3a144a..b28e73af5 100644 --- a/tensorlayer/layers/dense/dropconnect.py +++ b/tensorlayer/layers/dense/dropconnect.py @@ -2,20 +2,16 @@ # -*- coding: utf-8 -*- import numbers - -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'DropconnectDense', ] -class DropconnectDense(Layer): +class DropconnectDense(Module): """ The :class:`DropconnectDense` class is :class:`Dense` with DropConnect behaviour which randomly removes connections between this layer and the previous @@ -83,7 +79,7 @@ def __init__( logging.info( "DropconnectDense %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): @@ -109,11 +105,15 @@ def build(self, inputs_shape): if self.b_init: self.b = self._get_weights("biases", shape=(self.n_units), init=self.b_init) + self.dropout = tl.ops.Dropout(keep=self.keep) + self.matmul = tl.ops.MatMul() + self.bias_add = tl.ops.BiasAdd() + def forward(self, inputs): - W_dropcon = tf.nn.dropout(self.W, 1 - (self.keep)) - outputs = tf.matmul(inputs, W_dropcon) + W_dropcon = self.dropout(self.W) + outputs = self.matmul(inputs, W_dropcon) if self.b_init: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') + outputs = self.bias_add(outputs, self.b) if self.act: outputs = self.act(outputs) return outputs diff --git a/tensorlayer/layers/dense/quan_dense.py b/tensorlayer/layers/dense/quan_dense.py index 67ca73074..062858a5f 100644 --- a/tensorlayer/layers/dense/quan_dense.py +++ b/tensorlayer/layers/dense/quan_dense.py @@ -1,12 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) __all__ = [ @@ -14,7 +11,7 @@ ] -class QuanDense(Layer): +class QuanDense(Module): """The :class:`QuanDense` class is a quantized fully connected layer with BN, which weights are 'bitW' bits and the output of the previous layer are 'bitA' bits while inferencing. @@ -97,6 +94,9 @@ def build(self, inputs_shape): self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) if self.b_init is not None: self.b = self._get_weights("biases", shape=int(self.n_units), init=self.b_init) + self.bias_add = tl.ops.BiasAdd() + + self.matmul = tl.ops.MatMul() def forward(self, inputs): @@ -105,10 +105,10 @@ def forward(self, inputs): W_ = quantize_weight_overflow(self.W, self.bitW) # outputs = tf.matmul(inputs, self.W) - outputs = tf.matmul(inputs, W_) # hao dong change to this + outputs = self.matmul(inputs, W_) # hao dong change to this if self.b_init is not None: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') + outputs = self.bias_add(outputs, self.b) if self.act: outputs = self.act(outputs) return outputs diff --git a/tensorlayer/layers/dense/quan_dense_bn.py b/tensorlayer/layers/dense/quan_dense_bn.py deleted file mode 100644 index 9270f548d..000000000 --- a/tensorlayer/layers/dense/quan_dense_bn.py +++ /dev/null @@ -1,194 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf -# from tensorlayer.layers.core import LayersConfig -from tensorflow.python.training import moving_averages - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) - -__all__ = [ - 'QuanDenseWithBN', -] - - -class QuanDenseWithBN(Layer): - """The :class:`QuanDenseWithBN` class is a quantized fully connected layer with BN, which weights are 'bitW' bits and the output of the previous layer - are 'bitA' bits while inferencing. - - Parameters - ---------- - n_units : int - The number of units of this layer. - act : activation function - The activation function of this layer. - decay : float - A decay factor for `ExponentialMovingAverage`. - Suggest to use a large value for large dataset. - epsilon : float - Eplison. - is_train : boolean - Is being used for training or inference. - beta_init : initializer or None - The initializer for initializing beta, if None, skip beta. - Usually you should not skip beta unless you know what happened. - gamma_init : initializer or None - The initializer for initializing gamma, if None, skip gamma. - bitW : int - The bits of this layer's parameter - bitA : int - The bits of the output of previous layer - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). - W_init : initializer - The initializer for the the weight matrix. - W_init_args : dictionary - The arguments for the weight matrix initializer. - in_channels: int - The number of channels of the previous layer. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : a str - A unique layer name. - - Examples - --------- - >>> import tensorlayer as tl - >>> net = tl.layers.Input([50, 256]) - >>> layer = tl.layers.QuanDenseWithBN(128, act='relu', name='qdbn1')(net) - >>> print(layer) - >>> net = tl.layers.QuanDenseWithBN(256, act='relu', name='qdbn2')(net) - >>> print(net) - """ - - def __init__( - self, - n_units=100, - act=None, - decay=0.9, - epsilon=1e-5, - is_train=False, - bitW=8, - bitA=8, - gamma_init=tl.initializers.truncated_normal(stddev=0.05), - beta_init=tl.initializers.truncated_normal(stddev=0.05), - use_gemm=False, - W_init=tl.initializers.truncated_normal(stddev=0.05), - W_init_args=None, - in_channels=None, - name=None, # 'quan_dense_with_bn', - ): - super(QuanDenseWithBN, self).__init__(act=act, W_init_args=W_init_args, name=name) - self.n_units = n_units - self.decay = decay - self.epsilon = epsilon - self.is_train = is_train - self.bitW = bitW - self.bitA = bitA - self.gamma_init = gamma_init - self.beta_init = beta_init - self.use_gemm = use_gemm - self.W_init = W_init - self.in_channels = in_channels - - if self.in_channels is not None: - self.build((None, self.in_channels)) - self._built = True - - logging.info( - "QuanDenseLayerWithBN %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(n_units={n_units}, ' + actstr) - s += ', bitW={bitW}, bitA={bitA}' - if self.in_channels is not None: - s += ', in_channels=\'{in_channels}\'' - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if self.in_channels is None and len(inputs_shape) != 2: - raise Exception("The input dimension must be rank 2, please reshape or flatten it") - - if self.in_channels is None: - self.in_channels = inputs_shape[1] - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - n_in = inputs_shape[-1] - self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) - - para_bn_shape = (self.n_units, ) - if self.gamma_init: - self.scale_para = self._get_weights("gamm_weights", shape=para_bn_shape, init=self.gamma_init) - else: - self.scale_para = None - - if self.beta_init: - self.offset_para = self._get_weights("beta_weights", shape=para_bn_shape, init=self.beta_init) - else: - self.offset_para = None - - self.moving_mean = self._get_weights( - "moving_mean", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False - ) - self.moving_variance = self._get_weights( - "moving_variacne", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False - ) - - def forward(self, inputs): - x = inputs - inputs = quantize_active_overflow(inputs, self.bitA) - mid_out = tf.matmul(x, self.W) - - mean, variance = tf.nn.moments(x=mid_out, axes=list(range(len(mid_out.get_shape()) - 1))) - - update_moving_mean = moving_averages.assign_moving_average( - self.moving_mean, mean, self.decay, zero_debias=False - ) # if zero_debias=True, has bias - - update_moving_variance = moving_averages.assign_moving_average( - self.moving_variance, variance, self.decay, zero_debias=False - ) # if zero_debias=True, has bias - - if self.is_train: - mean, var = self.mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance) - else: - mean, var = self.moving_mean, self.moving_variance - - w_fold = self._w_fold(self.W, self.scale_para, var, self.epsilon) - - W = quantize_weight_overflow(w_fold, self.bitW) - - outputs = tf.matmul(inputs, W) - - if self.beta_init: - bias_fold = self._bias_fold(self.offset_para, self.scale_para, mean, var, self.epsilon) - outputs = tf.nn.bias_add(outputs, bias_fold, name='bias_add') - else: - outputs = outputs - - if self.act: - outputs = self.act(outputs) - else: - outputs = outputs - return outputs - - def mean_var_with_update(self, update_moving_mean, update_moving_variance, mean, variance): - with tf.control_dependencies([update_moving_mean, update_moving_variance]): - return tf.identity(mean), tf.identity(variance) - - def _w_fold(self, w, gama, var, epsilon): - return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) - - def _bias_fold(self, beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/layers/dense/ternary_dense.py b/tensorlayer/layers/dense/ternary_dense.py deleted file mode 100644 index 49479df7c..000000000 --- a/tensorlayer/layers/dense/ternary_dense.py +++ /dev/null @@ -1,108 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import compute_alpha, ternary_operation - -__all__ = [ - 'TernaryDense', -] - - -class TernaryDense(Layer): - """The :class:`TernaryDense` class is a ternary fully connected layer, which weights are either -1 or 1 or 0 while inference. - - Note that, the bias vector would not be tenaried. - - Parameters - ---------- - n_units : int - The number of units of this layer. - act : activation function - The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`SignLayer` after :class:`BatchNormLayer`. - use_gemm : boolean - If True, use gemm instead of ``tf.matmul`` for inference. (TODO). - W_init : initializer - The initializer for the weight matrix. - b_init : initializer or None - The initializer for the bias vector. If None, skip biases. - in_channels: int - The number of channels of the previous layer. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : None or str - A unique layer name. - - """ - - def __init__( - self, - n_units=100, - act=None, - use_gemm=False, - W_init=tl.initializers.truncated_normal(stddev=0.05), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, - name=None, #'ternary_dense', - ): - super().__init__(name, act=act) - self.n_units = n_units - self.use_gemm = use_gemm - self.W_init = W_init - self.b_init = b_init - self.in_channels = in_channels - - if self.in_channels is not None: - self.build((None, self.in_channels)) - self._built = True - - logging.info( - "TernaryDense %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(n_units={n_units}, ' + actstr) - if self.in_channels is not None: - s += ', in_channels=\'{in_channels}\'' - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape): - if len(inputs_shape) != 2: - raise Exception("The input dimension must be rank 2, please reshape or flatten it") - - if self.in_channels is None: - self.in_channels = inputs_shape[1] - - if self.use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - n_in = inputs_shape[-1] - - self.W = self._get_weights(var_name="weights", shape=(n_in, self.n_units), init=self.W_init) - if self.b_init is not None: - self.b = self._get_weights(var_name="biases", shape=(self.n_units), init=self.b_init) - - def forward(self, inputs): - # W = tl.act.sign(W) # dont update ... - alpha = compute_alpha(self.W) - W_ = ternary_operation(self.W) - W_ = tf.multiply(alpha, W_) - # W = tf.Variable(W) - - outputs = tf.matmul(inputs, W_) - # self.outputs = xnor_gemm(self.inputs, W) # TODO - - if self.b_init is not None: - outputs = tf.nn.bias_add(outputs, self.b, name='bias_add') - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/dropout.py b/tensorlayer/layers/dropout.py index 3724d8b43..61cc881d4 100644 --- a/tensorlayer/layers/dropout.py +++ b/tensorlayer/layers/dropout.py @@ -1,20 +1,16 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import LayersConfig +from tensorlayer.layers.core import Module __all__ = [ 'Dropout', ] -class Dropout(Layer): +class Dropout(Module): """ The :class:`Dropout` class is a noise layer which randomly set some activations to zero according to a keeping probability. @@ -31,7 +27,7 @@ class Dropout(Layer): """ - def __init__(self, keep, seed=None, name=None): #"dropout"): + def __init__(self, keep, seed=0, name=None): #"dropout"): super(Dropout, self).__init__(name) self.keep = keep self.seed = seed @@ -49,12 +45,22 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.dropout = tl.ops.Dropout(keep=self.keep, seed=self.seed) # @tf.function def forward(self, inputs): if self.is_train: - outputs = tf.nn.dropout(inputs, rate=1 - (self.keep), seed=self.seed, name=self.name) + outputs = self.dropout(inputs) else: outputs = inputs return outputs + + +if __name__ == '__main__': + shapes_do = (20, 16, 50) + from tensorlayer.layers.inputs import Input + # from mindspore import context + # context.set_context(mode=context.GRAPH_MODE) + inputs_do = Input(shapes_do) + dropout = Dropout(keep=0.1)(inputs_do) + print(dropout) diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index 9d0d882d1..e8d1e3748 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -1,24 +1,15 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import numpy as np -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.layers.core import Layer - +from tensorlayer.layers.core import Module # from tensorlayer.layers.core import LayersConfig -__all__ = [ - 'OneHot', - 'Word2vecEmbedding', - 'Embedding', - 'AverageEmbedding', -] +__all__ = ['OneHot', 'Word2vecEmbedding', 'Embedding', 'AverageEmbedding'] -class OneHot(Layer): +class OneHot(Module): """ The :class:`OneHot` class is the starting layer of a neural network, see ``tf.one_hot``. Useful link: `https://www.tensorflow.org/api_docs/python/tf/one_hot`. @@ -42,7 +33,7 @@ class OneHot(Layer): --------- >>> import tensorflow as tf >>> import tensorlayer as tl - >>> net = tl.layers.Input([32], dtype=tf.int32) + >>> net = tl.layers.Input([32], dtype=tl.int32) >>> onehot = tl.layers.OneHot(depth=8) >>> print(onehot) OneHot(depth=8, name='onehot') @@ -52,8 +43,7 @@ class OneHot(Layer): """ - def __init__(self, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name=None): #'input'): - + def __init__(self, depth=None, on_value=1.0, off_value=0.0, axis=-1, dtype=tl.float32, name=None): super(OneHot, self).__init__(name) self.depth = depth self.on_value = on_value @@ -62,9 +52,8 @@ def __init__(self, depth=None, on_value=None, off_value=None, axis=None, dtype=N self.dtype = dtype logging.info("OneHotInput %s" % (self.name)) - if not self._built: - self.build(tuple()) - self._built = True + self.build() + self._built = True if self.depth is None: raise RuntimeError(self.__class__.__name__ + ": depth == None the number of output units is undefined") @@ -82,10 +71,11 @@ def __repr__(self): s += ')' return s.format(classname=self.__class__.__name__, **self.__dict__) - def build(self, inputs_shape): - pass + def build(self, inputs_shape=None): + self.onehot = tl.ops.OneHot( + depth=self.depth, on_value=self.on_value, off_value=self.off_value, axis=self.axis, dtype=self.dtype + ) - # @tf.function def forward(self, inputs): """ Parameters @@ -93,13 +83,11 @@ def forward(self, inputs): inputs : input tensor The inputs are indices. The locations represented by indices in indices take value on_value, while all other locations take value off_value. """ - outputs = tf.one_hot( - inputs, self.depth, on_value=self.on_value, off_value=self.off_value, axis=self.axis, dtype=self.dtype - ) + outputs = self.onehot(inputs) return outputs -class Word2vecEmbedding(Layer): +class Word2vecEmbedding(Module): """ The :class:`Word2vecEmbedding` class is a fully connected layer. For Word Embedding, words are input as integer index. @@ -128,7 +116,7 @@ class Word2vecEmbedding(Layer): In a static model, once the model is constructed, the computation of nce loss cannot be changed (always computed or not computed). nce_loss_args : dictionary - The arguments for tf.nn.nce_loss() + The arguments for tf.ops.nce_loss() E_init : initializer The initializer for initializing the embedding matrix nce_W_init : initializer @@ -248,7 +236,7 @@ def build(self, inputs_shape): init=self.E_init, ) - self.normalized_embeddings = tf.nn.l2_normalize(self.embeddings, 1) + self.normalized_embeddings = tl.L2Normalize(axis=1)(self.embeddings) if self.activate_nce_loss: # Construct the variables for the NCE loss (i.e. negative sampling) @@ -264,7 +252,9 @@ def build(self, inputs_shape): init=self.nce_b_init, ) - # @tf.function + self.embedding_lookup = tl.EmbeddingLookup() + self.nce_loss = tl.NCELoss(**self.nce_loss_args) + def forward(self, inputs, use_nce_loss=None): """ Parameters @@ -284,8 +274,10 @@ def forward(self, inputs, use_nce_loss=None): The nce_cost is returned only if the nce_loss is used. """ - ids = inputs[0] if isinstance(inputs, list) else inputs - outputs = tf.nn.embedding_lookup(params=self.embeddings, ids=ids) + if isinstance(inputs, list): + outputs = self.embedding_lookup(params=self.embeddings, ids=inputs[0]) + else: + outputs = self.embedding_lookup(params=self.embeddings, ids=inputs) if use_nce_loss is True and not self.activate_nce_loss: raise AttributeError( @@ -297,10 +289,10 @@ def forward(self, inputs, use_nce_loss=None): if not isinstance(inputs, list): raise ValueError("If nce loss is used, the labels of inputs must be provided.") - nce_cost = tf.reduce_mean( - input_tensor=tf.nn.nce_loss( + nce_cost = tl.reduce_mean( + input_tensor=self.nce_loss( weights=self.nce_weights, biases=self.nce_biases, inputs=outputs, labels=inputs[1], - num_sampled=self.num_sampled, num_classes=self.vocabulary_size, **self.nce_loss_args + num_sampled=self.num_sampled, num_classes=self.vocabulary_size ) ) @@ -309,7 +301,7 @@ def forward(self, inputs, use_nce_loss=None): return outputs -class Embedding(Layer): +class Embedding(Module): """ The :class:`Embedding` class is a look-up table for word embedding. @@ -387,8 +379,8 @@ def build(self, inputs_shape): shape=(self.vocabulary_size, self.embedding_size), init=self.E_init, ) + self.embedding_lookup = tl.EmbeddingLookup() - # @tf.function def forward(self, inputs): """ Parameters @@ -396,11 +388,11 @@ def forward(self, inputs): inputs : Tensor The input of a network. """ - outputs = tf.nn.embedding_lookup(params=self.embeddings, ids=inputs) + outputs = self.embedding_lookup(params=self.embeddings, ids=inputs) return outputs -class AverageEmbedding(Layer): +class AverageEmbedding(Module): """The :class:`AverageEmbedding` averages over embeddings of inputs. This is often used as the input layer for models like DAN[1] and FastText[2]. @@ -487,8 +479,13 @@ def build(self, inputs_shape): shape=(self.vocabulary_size, self.embedding_size), init=self.E_init, ) + self.embedding_lookup = tl.EmbeddingLookup() + self.not_equal = tl.Not_equal() + self.cast = tl.Cast(tl.float32) + self.expand_dims = tl.ExpandDims(axis=-1) + self.reduce_sum = tl.ReduceSum(axis=1) + self.count_nonzero = tl.Count_nonzero(keepdims=True, dtype=tl.float32) - # @tf.function def forward(self, inputs): """ Parameters @@ -497,25 +494,15 @@ def forward(self, inputs): The network input. For word inputs, please use integer index format, 2D tensor: (batch_size, sentence_length). """ - word_embeddings = tf.nn.embedding_lookup( - params=self.embeddings, - ids=inputs, - name='word_embeddings', - ) + word_embeddings = self.embedding_lookup(params=self.embeddings, ids=inputs) # Zero out embeddings of pad value - masks = tf.not_equal(inputs, self.pad_value, name='masks') - word_embeddings *= tf.cast(tf.expand_dims(masks, axis=-1), dtype=tf.float32) - sum_word_embeddings = tf.reduce_sum(input_tensor=word_embeddings, axis=1) + masks = self.not_equal(inputs, self.pad_value) + word_embeddings *= self.cast(self.expand_dims(masks)) + sum_word_embeddings = self.reduce_sum(input=word_embeddings) # Count number of non-padding words in each sentence - sentence_lengths = tf.math.count_nonzero( - masks, - axis=1, - keepdims=True, - dtype=tf.float32, - name='sentence_lengths', - ) + sentence_lengths = self.count_nonzero(masks, axis=1) sentence_embeddings = tf.divide( sum_word_embeddings, @@ -526,3 +513,16 @@ def forward(self, inputs): outputs = sentence_embeddings return outputs + + +if __name__ == '__main__': + import tensorflow as tf + import tensorlayer as tl + batch_size = 8 + length = 5 + input = tl.layers.Input([batch_size, length], dtype=tl.int32) + avgembed = AverageEmbedding(vocabulary_size=1000, embedding_size=50, name='avg') + print(avgembed) + AverageEmbedding(vocabulary_size=1000, embedding_size=50, pad_value=0) + tensor = avgembed(input) + print(tensor) diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index c34815e97..9f765c518 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -1,11 +1,10 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'ExpandDims', @@ -13,7 +12,7 @@ ] -class ExpandDims(Layer): +class ExpandDims(Module): """ The :class:`ExpandDims` class inserts a dimension of 1 into a tensor's shape, see `tf.expand_dims() `__ . @@ -53,15 +52,15 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - pass + self.expand_dims = tl.ops.ExpandDims(axis=self.axis) # @tf.function def forward(self, inputs): - outputs = tf.expand_dims(inputs, axis=self.axis, name=self.name) + outputs = self.expand_dims(inputs) return outputs -class Tile(Layer): +class Tile(Module): """ The :class:`Tile` class constructs a tensor by tiling a given tensor, see `tf.tile() `__ . @@ -78,7 +77,6 @@ class Tile(Layer): -------- >>> x = tl.layers.Input([10, 3], name='in') >>> y = tl.layers.Tile(multiples=[2, 3])(x) - [20, 9] """ def __init__(self, multiples=None, name=None): #'tile'): @@ -99,9 +97,9 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - pass + self.tile = tl.ops.Tile() # @tf.function def forward(self, inputs): - outputs = tf.tile(inputs, multiples=self.multiples, name=self.name) + outputs = self.tile(inputs, multiples=self.multiples) return outputs diff --git a/tensorlayer/layers/image_resampling.py b/tensorlayer/layers/image_resampling.py index b327901a7..a7bdbf835 100644 --- a/tensorlayer/layers/image_resampling.py +++ b/tensorlayer/layers/image_resampling.py @@ -1,11 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'UpSampling2d', @@ -13,7 +11,7 @@ ] -class UpSampling2d(Layer): +class UpSampling2d(Module): """The :class:`UpSampling2d` class is a up-sampling 2D layer. See `tf.image.resize_images `__. @@ -45,30 +43,23 @@ class UpSampling2d(Layer): """ - def __init__( - self, - scale, - method='bilinear', - antialias=False, - data_format='channel_last', - name=None, - ): + def __init__(self, scale, method='bilinear', antialias=False, data_format='channel_last', name=None, ksize=None): super(UpSampling2d, self).__init__(name) self.method = method self.antialias = antialias self.data_format = data_format + self.ksize = ksize logging.info( "UpSampling2d %s: scale: %s method: %s antialias: %s" % (self.name, scale, self.method, self.antialias) ) - self.build(None) - self._built = True - if isinstance(scale, (list, tuple)) and len(scale) != 2: raise ValueError("scale must be int or tuple/list of length 2") self.scale = (scale, scale) if isinstance(scale, int) else scale + self.build(None) + self._built = True def __repr__(self): s = '{classname}(scale={scale}, method={method}' @@ -78,8 +69,10 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, scale=self.scale, method=self.method, name=self.name) def build(self, inputs_shape): - if self.data_format != 'channel_last': - raise Exception("UpSampling2d tf.image.resize_images only support channel_last") + self.resize = tl.ops.Resize( + scale=self.scale, method=self.method, antialias=self.antialias, data_format=self.data_format, + ksize=self.ksize + ) def forward(self, inputs): """ @@ -89,12 +82,17 @@ def forward(self, inputs): inputs : :class:`Tensor` Inputs tensors with 4-D Tensor of the shape (batch, height, width, channels) """ - output_size = [int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1])] - outputs = tf.image.resize(inputs, size=output_size, method=self.method, antialias=self.antialias) + outputs = self.resize(inputs) return outputs -class DownSampling2d(Layer): +if __name__ == '__main__': + ni = tl.layers.Input([10, 32, 50, 50], name='input') + y = UpSampling2d(scale=(2, 2), data_format='channels_first', ksize=(50, 50))(ni) + print(y) + + +class DownSampling2d(Module): """The :class:`DownSampling2d` class is down-sampling 2D layer. See `tf.image.resize_images `__. @@ -171,5 +169,5 @@ def forward(self, inputs): Inputs tensors with 4-D Tensor of the shape (batch, height, width, channels) """ output_size = [int(inputs.shape[1] * 1.0 / self.scale[0]), int(inputs.shape[2] * 1.0 / self.scale[1])] - outputs = tf.image.resize(inputs, size=output_size, method=self.method, antialias=self.antialias) + outputs = tl.ops.resize(inputs, output_size=output_size, method=self.method, antialias=self.antialias) return outputs diff --git a/tensorlayer/layers/inputs.py b/tensorlayer/layers/inputs.py index 9d537a33d..80a7d0c62 100644 --- a/tensorlayer/layers/inputs.py +++ b/tensorlayer/layers/inputs.py @@ -1,19 +1,14 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import numpy as np -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.layers.core import Layer, LayerNode - -# from tensorlayer.layers.core import LayersConfig +from tensorlayer.layers.core import Module, LayerNode __all__ = ['Input', '_InputLayer'] -class _InputLayer(Layer): +class _InputLayer(Module): """ The :class:`Input` class is the starting layer of a neural network. @@ -28,28 +23,24 @@ class _InputLayer(Layer): """ - def __init__(self, shape, dtype=tf.float32, name=None): #'input'): - # super(InputLayer, self).__init__(prev_layer=inputs, name=name) + def __init__(self, shape, dtype=tl.float32, name=None): super(_InputLayer, self).__init__(name) - if isinstance(dtype, str): - try: - dtype = eval(dtype) - except Exception as e: - raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) - if not isinstance(dtype, tf.DType): - raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) + # if isinstance(dtype, str): + # try: + # dtype = eval(dtype) + # except Exception as e: + # raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) + # if not isinstance(dtype, tl.DType): + # raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) logging.info("Input %s: %s" % (self.name, str(shape))) self.shape = shape # shape is needed in __repr__ - - shape_without_none = [_ if _ is not None else 1 for _ in shape] - # self.outputs = self.forward(tl.initializers.random_normal()(shape_without_none)) - outputs = self.forward(tl.initializers.ones()(shape_without_none, dtype=dtype)) - - self._built = True - - self._add_node(outputs, outputs) + self.dtype = dtype + self.shape_without_none = [_ if _ is not None else 1 for _ in shape] + self.outputs = tl.initializers.ones()(self.shape_without_none, dtype=self.dtype) + # self._built = True + # self._add_node(outputs, outputs) def __repr__(self): s = 'Input(shape=%s' % str(self.shape) @@ -58,17 +49,28 @@ def __repr__(self): s += ')' return s - def __call__(self, inputs, *args, **kwargs): - return super(_InputLayer, self).__call__(inputs) + def __call__(self, *args, **kwargs): + # return super(_InputLayer, self).__call__(inputs) + return self.outputs def build(self, inputs_shape): pass - def forward(self, inputs): - return inputs + def forward(self): + # tl.initializers.random_uniform() + # tl.initializers.random_normal() + # tl.initializers.truncated_normal() + # tl.initializers.constant(2.0) + # tl.initializers.He_Normal() + # tl.initializers.He_Normal() + # tl.initializers.zeros() + # tl.initializers.ones() + + # outputs = self.inputs(self.shape_without_none, dtype=self.dtype) + return self.outputs -def Input(shape, dtype=tf.float32, name=None): +def Input(shape, init=tl.initializers.ones(), dtype=tl.float32, name=None): """ The :class:`Input` class is the starting layer of a neural network. @@ -81,5 +83,6 @@ def Input(shape, dtype=tf.float32, name=None): """ input_layer = _InputLayer(shape, dtype=dtype, name=name) - outputs = input_layer._nodes[0].out_tensors[0] + outputs = input_layer(init) + # outputs = input_layer._nodes[0].out_tensors[0] return outputs diff --git a/tensorlayer/layers/lambda_layers.py b/tensorlayer/layers/lambda_layers.py deleted file mode 100644 index c650f233c..000000000 --- a/tensorlayer/layers/lambda_layers.py +++ /dev/null @@ -1,283 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.files import utils -from tensorlayer.layers.core import Layer - -# from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES - -__all__ = [ - 'Lambda', - 'ElementwiseLambda', -] - - -class Lambda(Layer): - """A layer that takes a user-defined function using Lambda. - If the function has trainable weights, the weights should be provided. - Remember to make sure the weights provided when the layer is constructed are SAME as - the weights used when the layer is forwarded. - For multiple inputs see :class:`ElementwiseLambda`. - - Parameters - ---------- - fn : function - The function that applies to the inputs (e.g. tensor from the previous layer). - fn_weights : list - The trainable weights for the function if any. Optional. - fn_args : dict - The arguments for the function if any. Optional. - name : str or None - A unique layer name. - - Examples - --------- - Non-parametric and non-args case: - This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - - >>> x = tl.layers.Input([8, 3], name='input') - >>> y = tl.layers.Lambda(lambda x: 2*x, name='lambda')(x) - - - Non-parametric and with args case: - This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - - >>> def customize_func(x, foo=42): # x is the inputs, foo is an argument - >>> return foo * x - >>> x = tl.layers.Input([8, 3], name='input') - >>> lambdalayer = tl.layers.Lambda(customize_func, fn_args={'foo': 2}, name='lambda')(x) - - - Any function with outside variables: - This case has not been supported in Model.save() / Model.load() yet. - Please avoid using Model.save() / Model.load() to save / load models that contain such Lambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. - Note: In this case, fn_weights should be a list, and then the trainable weights in this Lambda layer can be added into the weights of the whole model. - - >>> a = tf.Variable(1.0) - >>> def func(x): - >>> return x + a - >>> x = tl.layers.Input([8, 3], name='input') - >>> y = tl.layers.Lambda(func, fn_weights=[a], name='lambda')(x) - - - Parametric case, merge other wrappers into TensorLayer: - This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - - >>> layers = [ - >>> tf.keras.layers.Dense(10, activation=tf.nn.relu), - >>> tf.keras.layers.Dense(5, activation=tf.nn.sigmoid), - >>> tf.keras.layers.Dense(1, activation=tf.identity) - >>> ] - >>> perceptron = tf.keras.Sequential(layers) - >>> # in order to compile keras model and get trainable_variables of the keras model - >>> _ = perceptron(np.random.random([100, 5]).astype(np.float32)) - >>> - >>> class CustomizeModel(tl.models.Model): - >>> def __init__(self): - >>> super(CustomizeModel, self).__init__() - >>> self.dense = tl.layers.Dense(in_channels=1, n_units=5) - >>> self.lambdalayer = tl.layers.Lambda(perceptron, perceptron.trainable_variables) - >>> - >>> def forward(self, x): - >>> z = self.dense(x) - >>> z = self.lambdalayer(z) - >>> return z - >>> - >>> optimizer = tf.optimizers.Adam(learning_rate=0.1) - >>> model = CustomizeModel() - >>> model.train() - >>> - >>> for epoch in range(50): - >>> with tf.GradientTape() as tape: - >>> pred_y = model(data_x) - >>> loss = tl.cost.mean_squared_error(pred_y, data_y) - >>> - >>> gradients = tape.gradient(loss, model.trainable_weights) - >>> optimizer.apply_gradients(zip(gradients, model.trainable_weights)) - - """ - - def __init__( - self, - fn, - fn_weights=None, - fn_args=None, - name=None, - ): - - super(Lambda, self).__init__(name=name) - self.fn = fn - self._trainable_weights = fn_weights if fn_weights is not None else [] - self.fn_args = fn_args if fn_args is not None else {} - - try: - fn_name = repr(self.fn) - except: - fn_name = 'name not available' - logging.info("Lambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights))) - - self.build() - self._built = True - - def __repr__(self): - s = '{classname}(' - s += 'fn={fn_name},' - s += 'len_weights={len_weights},' - s += 'name=\'{name}\'' - s += ')' - try: - fn_name = repr(self.fn) - except: - fn_name = 'name not available' - return s.format( - classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights), - **self.__dict__ - ) - - def build(self, inputs_shape=None): - pass - - def forward(self, inputs, **kwargs): - - if len(kwargs) == 0: - outputs = self.fn(inputs, **self.fn_args) - else: - outputs = self.fn(inputs, **kwargs) - - return outputs - - def get_args(self): - init_args = {} - if isinstance(self.fn, tf.keras.layers.Layer) or isinstance(self.fn, tf.keras.Model): - init_args.update({"layer_type": "keraslayer"}) - init_args["fn"] = utils.save_keras_model(self.fn) - init_args["fn_weights"] = None - if len(self._nodes) == 0: - init_args["keras_input_shape"] = [] - else: - init_args["keras_input_shape"] = self._nodes[0].in_tensors[0].get_shape().as_list() - else: - init_args = {"layer_type": "normal"} - return init_args - - -class ElementwiseLambda(Layer): - """A layer that use a custom function to combine multiple :class:`Layer` inputs. - If the function has trainable weights, the weights should be provided. - Remember to make sure the weights provided when the layer is constructed are SAME as - the weights used when the layer is forwarded. - - Parameters - ---------- - fn : function - The function that applies to the inputs (e.g. tensor from the previous layer). - fn_weights : list - The trainable weights for the function if any. Optional. - fn_args : dict - The arguments for the function if any. Optional. - name : str or None - A unique layer name. - - Examples - -------- - - Non-parametric and with args case - This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - - >>> # z = mean + noise * tf.exp(std * 0.5) + foo - >>> def func(noise, mean, std, foo=42): - >>> return mean + noise * tf.exp(std * 0.5) + foo - >>> noise = tl.layers.Input([100, 1]) - >>> mean = tl.layers.Input([100, 1]) - >>> std = tl.layers.Input([100, 1]) - >>> out = tl.layers.ElementwiseLambda(fn=func, fn_args={'foo': 84}, name='elementwiselambda')([noise, mean, std]) - - - Non-parametric and non-args case - This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - - >>> # z = mean + noise * tf.exp(std * 0.5) - >>> noise = tl.layers.Input([100, 1]) - >>> mean = tl.layers.Input([100, 1]) - >>> std = tl.layers.Input([100, 1]) - >>> out = tl.layers.ElementwiseLambda(fn=lambda x, y, z: x + y * tf.exp(z * 0.5), name='elementwiselambda')([noise, mean, std]) - - - Any function with outside variables - This case has not been supported in Model.save() / Model.load() yet. - Please avoid using Model.save() / Model.load() to save / load models that contain such ElementwiseLambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. - Note: In this case, fn_weights should be a list, and then the trainable weights in this ElementwiseLambda layer can be added into the weights of the whole model. - - >>> # z = mean + noise * tf.exp(std * 0.5) + vara - >>> vara = [tf.Variable(1.0)] - >>> def func(noise, mean, std): - >>> return mean + noise * tf.exp(std * 0.5) + vara - >>> noise = tl.layers.Input([100, 1]) - >>> mean = tl.layers.Input([100, 1]) - >>> std = tl.layers.Input([100, 1]) - >>> out = tl.layers.ElementwiseLambda(fn=func, fn_weights=vara, name='elementwiselambda')([noise, mean, std]) - - """ - - def __init__( - self, - fn, - fn_weights=None, - fn_args=None, - name=None, #'elementwiselambda', - ): - - super(ElementwiseLambda, self).__init__(name=name) - self.fn = fn - self._trainable_weights = fn_weights if fn_weights is not None else [] - self.fn_args = fn_args if fn_args is not None else {} - - try: - fn_name = repr(self.fn) - except: - fn_name = 'name not available' - logging.info( - "ElementwiseLambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights)) - ) - - self.build() - self._built = True - - def __repr__(self): - s = '{classname}(' - s += 'fn={fn_name},' - s += 'len_weights={len_weights},' - s += 'name=\'{name}\'' - s += ')' - try: - fn_name = repr(self.fn) - except: - fn_name = 'name not available' - return s.format( - classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights), - **self.__dict__ - ) - - def build(self, inputs_shape=None): - # do nothing - # the weights of the function are provided when the Lambda layer is constructed - pass - - # @tf.function - def forward(self, inputs, **kwargs): - - if not isinstance(inputs, list): - raise TypeError( - "The inputs should be a list of values which corresponds with the customised lambda function." - ) - - if len(kwargs) == 0: - outputs = self.fn(*inputs, **self.fn_args) - else: - outputs = self.fn(*inputs, **kwargs) - - return outputs diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 3191d9db1..ff26621ae 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -1,10 +1,10 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'Concat', @@ -12,7 +12,7 @@ ] -class Concat(Layer): +class Concat(Module): """A layer that concats multiple tensors according to given axis. Parameters @@ -24,7 +24,7 @@ class Concat(Layer): Examples ---------- - >>> class CustomModel(tl.models.Model): + >>> class CustomModel(Module): >>> def __init__(self): >>> super(CustomModel, self).__init__(name="custom") >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') @@ -58,7 +58,7 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - pass + self.concat = tl.ops.Concat(self.concat_dim) # @tf.function def forward(self, inputs): @@ -67,12 +67,11 @@ def forward(self, inputs): prev_layer : list of :class:`Layer` List of layers to concatenate. """ - outputs = tf.concat(inputs, self.concat_dim, name=self.name) - + outputs = self.concat(inputs) return outputs -class Elementwise(Layer): +class Elementwise(Module): """A layer that combines multiple :class:`Layer` that have the same output shapes according to an element-wise operation. If the element-wise operation is complicated, please consider to use :class:`ElementwiseLambda`. @@ -106,7 +105,7 @@ class Elementwise(Layer): def __init__( self, - combine_fn=tf.minimum, + combine_fn=tl.ops.minimum, act=None, name=None, #'elementwise', ): @@ -119,7 +118,7 @@ def __init__( logging.info( "Elementwise %s: fn: %s act: %s" % - (self.name, combine_fn.__name__, ('No Activation' if self.act is None else self.act.__name__)) + (self.name, combine_fn.__name__, ('No Activation' if self.act is None else self.act.__class__.__name__)) ) def __repr__(self): @@ -137,7 +136,26 @@ def build(self, inputs_shape): def forward(self, inputs): outputs = inputs[0] for input in inputs[1:]: - outputs = self.combine_fn(outputs, input, name=self.name) + outputs = self.combine_fn(outputs, input) if self.act: outputs = self.act(outputs) return outputs + + +# if __name__ == '__main__': +# from tensorlayer.layers import Dense, Input +# class CustomModel(Module): +# def __init__(self): +# super(CustomModel, self).__init__(name="custom") +# self.dense1 = Dense(in_channels=20, n_units=50, act=tl.ReLU, name='relu1_1') +# self.dense2 = Dense(in_channels=20, n_units=50, act=tl.ReLU, name='relu2_1') +# self.concat = Elementwise(combine_fn=tl.ops.minimum, name='minimum', act=tl.ReLU) +# +# def forward(self, inputs): +# d1 = self.dense1(inputs) +# d2 = self.dense2(inputs) +# outputs = self.concat([d1, d2]) +# return outputs +# input = Input(shape=[20, 20]) +# net = CustomModel() +# print(net(input)) diff --git a/tensorlayer/layers/noise.py b/tensorlayer/layers/noise.py index 1a6e85463..d1a164992 100644 --- a/tensorlayer/layers/noise.py +++ b/tensorlayer/layers/noise.py @@ -1,19 +1,16 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'GaussianNoise', ] -class GaussianNoise(Layer): +class GaussianNoise(Module): """ The :class:`GaussianNoise` class is noise layer that adding noise with gaussian distribution to the activation. @@ -36,7 +33,7 @@ class GaussianNoise(Layer): With TensorLayer >>> net = tl.layers.Input([64, 200], name='input') - >>> net = tl.layers.Dense(n_units=100, act=tf.nn.relu, name='dense')(net) + >>> net = tl.layers.Dense(in_channels=200, n_units=100, act=tl.ReLU, name='dense')(net) >>> gaussianlayer = tl.layers.GaussianNoise(name='gaussian')(net) >>> print(gaussianlayer) >>> output shape : (64, 100) @@ -76,7 +73,15 @@ def forward(self, inputs): if (self.is_train or self.is_always) is False: return inputs else: - # noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape()) - noise = tf.random.normal(shape=inputs.get_shape(), mean=self.mean, stddev=self.stddev, seed=self.seed) + shapes = tl.get_tensor_shape(inputs) + noise = tl.ops.random_normal(shape=shapes, mean=self.mean, stddev=self.stddev, seed=self.seed) outputs = inputs + noise return outputs + + +# if __name__ == '__main__': +# from tensorlayer.layers import Dense, Input +# net = Input([64, 200], name='input') +# net = Dense(in_channels=200, n_units=100, act=tl.ReLU, name='dense')(net) +# gaussianlayer = GaussianNoise(name='gaussian')(net) +# print(gaussianlayer) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 161d6e018..113a83d67 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -1,152 +1,19 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf -from tensorflow.python.framework import ops -from tensorflow.python.ops import math_ops -from tensorflow.python.training import moving_averages - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ - 'LocalResponseNorm', - 'BatchNorm', # FIXME: wthether to keep BatchNorm + 'BatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', - 'InstanceNorm', - 'InstanceNorm1d', - 'InstanceNorm2d', - 'InstanceNorm3d', - 'LayerNorm', - 'GroupNorm', - 'SwitchNorm', ] -class LocalResponseNorm(Layer): - """The :class:`LocalResponseNorm` layer is for Local Response Normalization. - See ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version. - The 4-D input tensor is a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. - Within a given vector, each component is divided by the weighted square-sum of inputs within depth_radius. - - Parameters - ----------- - depth_radius : int - Depth radius. 0-D. Half-width of the 1-D normalization window. - bias : float - An offset which is usually positive and shall avoid dividing by 0. - alpha : float - A scale factor which is usually positive. - beta : float - An exponent. - name : None or str - A unique layer name. - - """ - - def __init__( - self, - depth_radius=None, - bias=None, - alpha=None, - beta=None, - name=None, #'lrn', - ): - # super(LocalResponseNorm, self).__init__(prev_layer=prev_layer, name=name) - super().__init__(name) - self.depth_radius = depth_radius - self.bias = bias - self.alpha = alpha - self.beta = beta - - logging.info( - "LocalResponseNorm %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % - (self.name, str(depth_radius), str(bias), str(alpha), str(beta)) - ) - - def build(self, inputs): - pass - - def forward(self, inputs): - """ - prev_layer : :class:`Layer` - The previous layer with a 4D output shape. - """ - outputs = tf.nn.lrn(inputs, depth_radius=self.depth_radius, bias=self.bias, alpha=self.alpha, beta=self.beta) - return outputs - - -def _to_channel_first_bias(b): - """Reshape [c] to [c, 1, 1].""" - channel_size = int(b.shape[0]) - new_shape = (channel_size, 1, 1) - # new_shape = [-1, 1, 1] # doesn't work with tensorRT - return tf.reshape(b, new_shape) - - -def _bias_scale(x, b, data_format): - """The multiplication counter part of tf.nn.bias_add.""" - if data_format == 'NHWC': - return x * b - elif data_format == 'NCHW': - return x * b - else: - raise ValueError('invalid data_format: %s' % data_format) - - -def _bias_add(x, b, data_format): - """Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.""" - if data_format == 'NHWC': - return tf.add(x, b) - elif data_format == 'NCHW': - return tf.add(x, b) - else: - raise ValueError('invalid data_format: %s' % data_format) - - -def _compute_shape(tensors): - if isinstance(tensors, list): - shape_mem = [t.get_shape().as_list() for t in tensors] - else: - shape_mem = tensors.get_shape().as_list() - return shape_mem - - -def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None): - """Data Format aware version of tf.nn.batch_normalization.""" - if data_format == 'channels_last': - mean = tf.reshape(mean, [1] * (len(x.shape) - 1) + [-1]) - variance = tf.reshape(variance, [1] * (len(x.shape) - 1) + [-1]) - offset = tf.reshape(offset, [1] * (len(x.shape) - 1) + [-1]) - scale = tf.reshape(scale, [1] * (len(x.shape) - 1) + [-1]) - elif data_format == 'channels_first': - mean = tf.reshape(mean, [1] + [-1] + [1] * (len(x.shape) - 2)) - variance = tf.reshape(variance, [1] + [-1] + [1] * (len(x.shape) - 2)) - offset = tf.reshape(offset, [1] + [-1] + [1] * (len(x.shape) - 2)) - scale = tf.reshape(scale, [1] + [-1] + [1] * (len(x.shape) - 2)) - else: - raise ValueError('invalid data_format: %s' % data_format) - - with ops.name_scope(name, 'batchnorm', [x, mean, variance, scale, offset]): - inv = math_ops.rsqrt(variance + variance_epsilon) - if scale is not None: - inv *= scale - - a = math_ops.cast(inv, x.dtype) - b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype) - - # Return a * x + b with customized data_format. - # Currently TF doesn't have bias_scale, and tensorRT has bug in converting tf.nn.bias_add - # So we reimplemted them to allow make the model work with tensorRT. - # See https://github.com/tensorlayer/openpose-plus/issues/75 for more details. - df = {'channels_first': 'NCHW', 'channels_last': 'NHWC'} - return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format]) - - -class BatchNorm(Layer): +class BatchNorm(Module): """ The :class:`BatchNorm` is a batch normalization layer for both fully-connected and convolution outputs. See ``tf.nn.batch_normalization`` and ``tf.nn.moments``. @@ -207,7 +74,7 @@ def __init__( decay=0.9, epsilon=0.00001, act=None, - is_train=False, + is_train=True, beta_init=tl.initializers.zeros(), gamma_init=tl.initializers.random_normal(mean=1.0, stddev=0.002), moving_mean_init=tl.initializers.zeros(), @@ -225,10 +92,17 @@ def __init__( self.moving_mean_init = moving_mean_init self.moving_var_init = moving_var_init self.num_features = num_features + self.is_train = is_train self.axes = None - if num_features is not None: + if self.num_features is None: + raise AttributeError( + "The registered layer `{}` should be built in advance. " + "Do you forget to pass the keyword argument 'num_feature'? " + ) + + if self.num_features: self.build(None) self._built = True @@ -236,12 +110,14 @@ def __init__( raise ValueError("decay should be between 0 to 1") logging.info( - "BatchNorm %s: decay: %f epsilon: %f act: %s is_train: %s" % - (self.name, decay, epsilon, self.act.__name__ if self.act is not None else 'No Activation', is_train) + "BatchNorm %s: decay: %f epsilon: %f act: %s is_train: %s" % ( + self.name, decay, epsilon, self.act.__class__.__name__ if self.act is not None else 'No Activation', + is_train + ) ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(num_features={num_features}, decay={decay}' ', epsilon={epsilon}') s += (', ' + actstr) if self.name is not None: @@ -263,8 +139,7 @@ def _get_param_shape(self, inputs_shape): return params_shape def _check_input_shape(self, inputs): - inputs_shape = _compute_shape(inputs) - if len(inputs_shape) <= 1: + if inputs.ndim <= 1: raise ValueError('expected input at least 2D, but got {}D input'.format(inputs.ndim)) def build(self, inputs_shape): @@ -272,38 +147,31 @@ def build(self, inputs_shape): self.beta, self.gamma = None, None if self.beta_init: - self.beta = self._get_weights("beta", shape=params_shape, init=self.beta_init) + self.beta = self._get_weights(var_name="beta", shape=params_shape, init=self.beta_init) if self.gamma_init: - self.gamma = self._get_weights("gamma", shape=params_shape, init=self.gamma_init) + self.gamma = self._get_weights(var_name="gamma", shape=params_shape, init=self.gamma_init) self.moving_mean = self._get_weights( - "moving_mean", shape=params_shape, init=self.moving_mean_init, trainable=False + var_name="moving_mean", shape=params_shape, init=self.moving_mean_init, trainable=False ) self.moving_var = self._get_weights( - "moving_var", shape=params_shape, init=self.moving_var_init, trainable=False + var_name="moving_var", shape=params_shape, init=self.moving_var_init, trainable=False ) - def forward(self, inputs): - self._check_input_shape(inputs) - - self.channel_axis = len(inputs.shape) - 1 if self.data_format == 'channels_last' else 1 - if self.axes is None: - self.axes = [i for i in range(len(inputs.shape)) if i != self.channel_axis] + self.batchnorm = tl.ops.BatchNorm( + decay=self.decay, epsilon=self.epsilon, beta=self.beta, gamma=self.gamma, moving_mean=self.moving_mean, + moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, + is_train=self.is_train + ) - mean, var = tf.nn.moments(inputs, self.axes, keepdims=False) - if self.is_train: - # update moving_mean and moving_var - self.moving_mean = moving_averages.assign_moving_average( - self.moving_mean, mean, self.decay, zero_debias=False - ) - self.moving_var = moving_averages.assign_moving_average(self.moving_var, var, self.decay, zero_debias=False) - outputs = batch_normalization(inputs, mean, var, self.beta, self.gamma, self.epsilon, self.data_format) - else: - outputs = batch_normalization( - inputs, self.moving_mean, self.moving_var, self.beta, self.gamma, self.epsilon, self.data_format - ) + self.act_init_flag = False if self.act: + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.batchnorm(inputs=inputs) + if self.act_init_flag: outputs = self.act(outputs) return outputs @@ -327,8 +195,7 @@ class BatchNorm1d(BatchNorm): """ def _check_input_shape(self, inputs): - inputs_shape = _compute_shape(inputs) - if len(inputs_shape) != 2 and len(inputs_shape) != 3: + if inputs.ndim != 2 and inputs.ndim != 3: raise ValueError('expected input to be 2D or 3D, but got {}D input'.format(inputs.ndim)) @@ -351,8 +218,7 @@ class BatchNorm2d(BatchNorm): """ def _check_input_shape(self, inputs): - inputs_shape = _compute_shape(inputs) - if len(inputs_shape) != 4: + if inputs.ndim != 4: raise ValueError('expected input to be 4D, but got {}D input'.format(inputs.ndim)) @@ -375,505 +241,5 @@ class BatchNorm3d(BatchNorm): """ def _check_input_shape(self, inputs): - inputs_shape = _compute_shape(inputs) - if len(inputs_shape) != 5: + if inputs.ndim != 5: raise ValueError('expected input to be 5D, but got {}D input'.format(inputs.ndim)) - - -class InstanceNorm(Layer): - """ - The :class:`InstanceNorm` is an instance normalization layer for both fully-connected and convolution outputs. - See ``tf.nn.batch_normalization`` and ``tf.nn.moments``. - - Parameters - ----------- - act : activation function. - The activation function of this layer. - epsilon : float - Eplison. - beta_init : initializer or None - The initializer for initializing beta, if None, skip beta. - Usually you should not skip beta unless you know what happened. - gamma_init : initializer or None - The initializer for initializing gamma, if None, skip gamma. - When the instance normalization layer is use instead of 'biases', or the next layer is linear, this can be - disabled since the scaling can be done by the next layer. see `Inception-ResNet-v2 `__ - num_features: int - Number of features for input tensor. Useful to build layer if using InstanceNorm1d, InstanceNorm2d or InstanceNorm3d, - but should be left as None if using InstanceNorm. Default None. - data_format : str - channels_last 'channel_last' (default) or channels_first. - name : None or str - A unique layer name. - - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') - >>> net = tl.layers.InstanceNorm()(net) - - Notes - ----- - The :class:`InstanceNorm` is universally suitable for 3D/4D/5D input in static model, but should not be used - in dynamic model where layer is built upon class initialization. So the argument 'num_features' should only be used - for subclasses :class:`InstanceNorm1d`, :class:`InstanceNorm2d` and :class:`InstanceNorm3d`. All the three subclasses are - suitable under all kinds of conditions. - """ - - def __init__( - self, act=None, epsilon=0.00001, beta_init=tl.initializers.zeros(), - gamma_init=tl.initializers.random_normal(mean=1.0, stddev=0.002), num_features=None, - data_format='channels_last', name=None - ): - super(InstanceNorm, self).__init__(name=name, act=act) - self.epsilon = epsilon - self.beta_init = beta_init - self.gamma_init = gamma_init - self.num_features = num_features - self.data_format = data_format - - if num_features is not None: - if not isinstance(self, InstanceNorm1d) and not isinstance(self, InstanceNorm2d) and not isinstance( - self, InstanceNorm3d): - raise ValueError( - "Please use InstanceNorm1d or InstanceNorm2d or InstanceNorm3d instead of InstanceNorm " - "if you want to specify 'num_features'." - ) - self.build(None) - self._built = True - - logging.info( - "InstanceNorm %s: epsilon: %f act: %s " % - (self.name, epsilon, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = '{classname}(num_features=num_features, epsilon={epsilon}' + actstr - if self.name is not None: - s += ', name="{name}"' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def _get_param_shape(self, inputs_shape): - if self.data_format == 'channels_last': - axis = len(inputs_shape) - 1 - elif self.data_format == 'channels_first': - axis = 1 - else: - raise ValueError('data_format should be either %s or %s' % ('channels_last', 'channels_first')) - - channels = inputs_shape[axis] - params_shape = [1] * len(inputs_shape) - params_shape[axis] = channels - - axes = [i for i in range(len(inputs_shape)) if i != 0 and i != axis] - return params_shape, axes - - def build(self, inputs_shape): - params_shape, self.axes = self._get_param_shape(inputs_shape) - - self.beta, self.gamma = None, None - if self.beta_init: - self.beta = self._get_weights("beta", shape=params_shape, init=self.beta_init) - - if self.gamma_init: - self.gamma = self._get_weights("gamma", shape=params_shape, init=self.gamma_init) - - def forward(self, inputs): - mean, var = tf.nn.moments(inputs, self.axes, keepdims=True) - outputs = batch_normalization(inputs, mean, var, self.beta, self.gamma, self.epsilon, self.data_format) - if self.act: - outputs = self.act(outputs) - return outputs - - -class InstanceNorm1d(InstanceNorm): - """The :class:`InstanceNorm1d` applies Instance Normalization over 3D input (a mini-instance of 1D - inputs with additional channel dimension), of shape (N, L, C) or (N, C, L). - See more details in :class:`InstanceNorm`. - - Examples - --------- - With TensorLayer - - >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 32], name='input') - >>> net = tl.layers.InstanceNorm1d()(net) - >>> # in dynamic model, build by specifying num_features - >>> conv = tl.layers.Conv1d(32, 5, 1, in_channels=3) - >>> bn = tl.layers.InstanceNorm1d(num_features=32) - - """ - - def _get_param_shape(self, inputs_shape): - if self.data_format == 'channels_last': - axis = 2 - elif self.data_format == 'channels_first': - axis = 1 - else: - raise ValueError('data_format should be either %s or %s' % ('channels_last', 'channels_first')) - - if self.num_features is None: - channels = inputs_shape[axis] - else: - channels = self.num_features - params_shape = [1] * 3 - params_shape[axis] = channels - - axes = [i for i in range(3) if i != 0 and i != axis] - return params_shape, axes - - -class InstanceNorm2d(InstanceNorm): - """The :class:`InstanceNorm2d` applies Instance Normalization over 4D input (a mini-instance of 2D - inputs with additional channel dimension) of shape (N, H, W, C) or (N, C, H, W). - See more details in :class:`InstanceNorm`. - - Examples - --------- - With TensorLayer - - >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') - >>> net = tl.layers.InstanceNorm2d()(net) - >>> # in dynamic model, build by specifying num_features - >>> conv = tl.layers.Conv2d(32, (5, 5), (1, 1), in_channels=3) - >>> bn = tl.layers.InstanceNorm2d(num_features=32) - - """ - - def _get_param_shape(self, inputs_shape): - if self.data_format == 'channels_last': - axis = 3 - elif self.data_format == 'channels_first': - axis = 1 - else: - raise ValueError('data_format should be either %s or %s' % ('channels_last', 'channels_first')) - - if self.num_features is None: - channels = inputs_shape[axis] - else: - channels = self.num_features - params_shape = [1] * 4 - params_shape[axis] = channels - - axes = [i for i in range(4) if i != 0 and i != axis] - return params_shape, axes - - -class InstanceNorm3d(InstanceNorm): - """The :class:`InstanceNorm3d` applies Instance Normalization over 5D input (a mini-instance of 3D - inputs with additional channel dimension) with shape (N, D, H, W, C) or (N, C, D, H, W). - See more details in :class:`InstanceNorm`. - - Examples - --------- - With TensorLayer - - >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') - >>> net = tl.layers.InstanceNorm3d()(net) - >>> # in dynamic model, build by specifying num_features - >>> conv = tl.layers.Conv3d(32, (5, 5, 5), (1, 1), in_channels=3) - >>> bn = tl.layers.InstanceNorm3d(num_features=32) - - """ - - def _get_param_shape(self, inputs_shape): - if self.data_format == 'channels_last': - axis = 4 - elif self.data_format == 'channels_first': - axis = 1 - else: - raise ValueError('data_format should be either %s or %s' % ('channels_last', 'channels_first')) - - if self.num_features is None: - channels = inputs_shape[axis] - else: - channels = self.num_features - params_shape = [1] * 5 - params_shape[axis] = channels - - axes = [i for i in range(5) if i != 0 and i != axis] - return params_shape, axes - - -# FIXME : not sure about the correctness, need testing -class LayerNorm(Layer): - """ - The :class:`LayerNorm` class is for layer normalization, see `tf.contrib.layers.layer_norm `__. - - Parameters - ---------- - prev_layer : :class:`Layer` - The previous layer. - act : activation function - The activation function of this layer. - others : _ - `tf.contrib.layers.layer_norm `__. - - """ - - def __init__( - self, #prev_layer, - center=True, - scale=True, - act=None, - # reuse=None, - # variables_collections=None, - # outputs_collections=None, - # trainable=True, - epsilon=1e-12, - begin_norm_axis=1, - begin_params_axis=-1, - beta_init=tl.initializers.zeros(), - gamma_init=tl.initializers.ones(), - data_format='channels_last', - name=None, - ): - - # super(LayerNorm, self).__init__(prev_layer=prev_layer, act=act, name=name) - super(LayerNorm, self).__init__(name, act=act) - self.center = center - self.scale = scale - self.epsilon = epsilon - self.begin_norm_axis = begin_norm_axis - self.begin_params_axis = begin_params_axis - self.beta_init = beta_init - self.gamma_init = gamma_init - self.data_format = data_format - - logging.info( - "LayerNorm %s: act: %s" % (self.name, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def build(self, inputs_shape): - params_shape = inputs_shape[self.begin_params_axis:] - self.beta, self.gamma = None, None - if self.center: - self.beta = self._get_weights("beta", shape=params_shape, init=self.beta_init) - if self.scale: - self.gamma = self._get_weights("gamma", shape=params_shape, init=self.gamma_init) - - self.norm_axes = range(self.begin_norm_axis, len(inputs_shape)) - - def forward(self, inputs): - mean, var = tf.nn.moments(inputs, self.norm_axes, keepdims=True) - # compute layer normalization using batch_normalization function - outputs = batch_normalization( - inputs, mean, var, self.beta, self.gamma, self.epsilon, data_format=self.data_format - ) - if self.act: - outputs = self.act(outputs) - return outputs - - # with tf.compat.v1.variable_scope(name) as vs: - # self.outputs = tf.contrib.layers.layer_norm( - # self.inputs, - # center=center, - # scale=scale, - # activation_fn=self.act, - # reuse=reuse, - # variables_collections=variables_collections, - # outputs_collections=outputs_collections, - # trainable=trainable, - # begin_norm_axis=begin_norm_axis, - # begin_params_axis=begin_params_axis, - # scope='var', - # ) - # - # variables = tf.compat.v1.get_collection("TF_GRAPHKEYS_VARIABLES", scope=vs.name) - # - # self._add_layers(self.outputs) - # self._add_params(variables) - - -class GroupNorm(Layer): - """The :class:`GroupNorm` layer is for Group Normalization. - See `tf.contrib.layers.group_norm `__. - - Parameters - ----------- - # prev_layer : :class:`Layer` - # The previous layer. - groups : int - The number of groups - act : activation function - The activation function of this layer. - epsilon : float - Eplison. - data_format : str - channels_last 'channel_last' (default) or channels_first. - name : None or str - A unique layer name - - """ - - def __init__(self, groups=32, epsilon=1e-06, act=None, data_format='channels_last', name=None): #'groupnorm'): - # super(GroupNorm, self).__init__(prev_layer=prev_layer, act=act, name=name) - super().__init__(name, act=act) - self.groups = groups - self.epsilon = epsilon - self.data_format = data_format - - logging.info( - "GroupNorm %s: act: %s" % (self.name, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def build(self, inputs_shape): - # shape = inputs.get_shape().as_list() - if len(inputs_shape) != 4: - raise Exception("This GroupNorm only supports 2D images.") - - if self.data_format == 'channels_last': - channels = inputs_shape[-1] - self.int_shape = tf.concat( - [#tf.shape(input=self.inputs)[0:3], - inputs_shape[0:3], - tf.convert_to_tensor(value=[self.groups, channels // self.groups])], axis=0 - ) - elif self.data_format == 'channels_first': - channels = inputs_shape[1] - self.int_shape = tf.concat( - [ - # tf.shape(input=self.inputs)[0:1], - inputs_shape[0:1], - tf.convert_to_tensor(value=[self.groups, channels // self.groups]), - # tf.shape(input=self.inputs)[2:4] - inputs_shape[2:4], - ], - axis=0 - ) - else: - raise ValueError("data_format must be 'channels_last' or 'channels_first'.") - - if self.groups > channels: - raise ValueError('Invalid groups %d for %d channels.' % (self.groups, channels)) - if channels % self.groups != 0: - raise ValueError('%d channels is not commensurate with %d groups.' % (channels, self.groups)) - - if self.data_format == 'channels_last': - # mean, var = tf.nn.moments(x, [1, 2, 4], keep_dims=True) - self.gamma = self._get_weights("gamma", shape=channels, init=tl.initializers.ones()) - # self.gamma = tf.compat.v1.get_variable('gamma', channels, initializer=tf.compat.v1.initializers.ones()) - self.beta = self._get_weights("beta", shape=channels, init=tl.initializers.zeros()) - # self.beta = tf.compat.v1.get_variable('beta', channels, initializer=tf.compat.v1.initializers.zeros()) - elif self.data_format == 'channels_first': - # mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True) - self.gamma = self._get_weights("gamma", shape=[1, channels, 1, 1], init=tl.initializers.ones()) - # self.gamma = tf.compat.v1.get_variable('gamma', [1, channels, 1, 1], initializer=tf.compat.v1.initializers.ones()) - self.beta = self._get_weights("beta", shape=[1, channels, 1, 1], init=tl.initializers.zeros()) - # self.beta = tf.compat.v1.get_variable('beta', [1, channels, 1, 1], initializer=tf.compat.v1.initializers.zeros()) - # self.add_weights([self.gamma, self.bata]) - - def forward(self, inputs): - x = tf.reshape(inputs, self.int_shape) - if self.data_format == 'channels_last': - mean, var = tf.nn.moments(x=x, axes=[1, 2, 4], keepdims=True) - elif self.data_format == 'channels_first': - mean, var = tf.nn.moments(x=x, axes=[2, 3, 4], keepdims=True) - else: - raise Exception("unknown data_format") - x = (x - mean) / tf.sqrt(var + self.epsilon) - - outputs = tf.reshape(x, tf.shape(input=inputs)) * self.gamma + self.beta - if self.act: - outputs = self.act(outputs) - return outputs - - -class SwitchNorm(Layer): - """ - The :class:`SwitchNorm` is a switchable normalization. - - Parameters - ---------- - act : activation function - The activation function of this layer. - epsilon : float - Eplison. - beta_init : initializer or None - The initializer for initializing beta, if None, skip beta. - Usually you should not skip beta unless you know what happened. - gamma_init : initializer or None - The initializer for initializing gamma, if None, skip gamma. - When the batch normalization layer is use instead of 'biases', or the next layer is linear, this can be - disabled since the scaling can be done by the next layer. see `Inception-ResNet-v2 `__ - moving_mean_init : initializer or None - The initializer for initializing moving mean, if None, skip moving mean. - data_format : str - channels_last 'channel_last' (default) or channels_first. - name : None or str - A unique layer name. - - References - ---------- - - `Differentiable Learning-to-Normalize via Switchable Normalization `__ - - `Zhihu (CN) `__ - - """ - - def __init__( - self, - act=None, - epsilon=1e-5, - beta_init=tl.initializers.constant(0.0), - gamma_init=tl.initializers.constant(1.0), - moving_mean_init=tl.initializers.zeros(), - # beta_init=tf.compat.v1.initializers.constant(0.0), - # gamma_init=tf.compat.v1.initializers.constant(1.0), - # moving_mean_init=tf.compat.v1.initializers.zeros(), - data_format='channels_last', - name=None, #'switchnorm', - ): - # super(SwitchNorm, self).__init__(prev_layer=prev_layer, act=act, name=name) - super().__init__(name, act=act) - self.epsilon = epsilon - self.beta_init = beta_init - self.gamma_init = gamma_init - self.moving_mean_init = moving_mean_init - self.data_format = data_format - - logging.info( - "SwitchNorm %s: epsilon: %f act: %s" % - (self.name, epsilon, self.act.__name__ if self.act is not None else 'No Activation') - ) - - def build(self, inputs_shape): - if len(inputs_shape) != 4: - raise Exception("This SwitchNorm only supports 2D images.") - if self.data_format != 'channels_last': - raise Exception("This SwitchNorm only supports channels_last.") - ch = inputs_shape[-1] - self.gamma = self._get_weights("gamma", shape=[ch], init=self.gamma_init) - # self.gamma = tf.compat.v1.get_variable("gamma", [ch], initializer=gamma_init) - self.beta = self._get_weights("beta", shape=[ch], init=self.beta_init) - # self.beta = tf.compat.v1.get_variable("beta", [ch], initializer=beta_init) - - self.mean_weight_var = self._get_weights("mean_weight", shape=[3], init=tl.initializers.constant(1.0)) - # self.mean_weight_var = tf.compat.v1.get_variable("mean_weight", [3], initializer=tf.compat.v1.initializers.constant(1.0)) - self.var_weight_var = self._get_weights("var_weight", shape=[3], init=tl.initializers.constant(1.0)) - # self.var_weight_var = tf.compat.v1.get_variable("var_weight", [3], initializer=tf.compat.v1.initializers.constant(1.0)) - - # self.add_weights([self.gamma, self.beta, self.mean_weight_var, self.var_weight_var]) - - def forward(self, inputs): - - batch_mean, batch_var = tf.nn.moments(x=inputs, axes=[0, 1, 2], keepdims=True) - ins_mean, ins_var = tf.nn.moments(x=inputs, axes=[1, 2], keepdims=True) - layer_mean, layer_var = tf.nn.moments(x=inputs, axes=[1, 2, 3], keepdims=True) - - mean_weight = tf.nn.softmax(self.mean_weight_var) - var_weight = tf.nn.softmax(self.var_weight_var) - - mean = mean_weight[0] * batch_mean + mean_weight[1] * ins_mean + mean_weight[2] * layer_mean - var = var_weight[0] * batch_var + var_weight[1] * ins_var + var_weight[2] * layer_var - - inputs = (inputs - mean) / (tf.sqrt(var + self.epsilon)) - outputs = inputs * self.gamma + self.beta - if self.act: - outputs = self.act(outputs) - return outputs diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index ae89035bc..138f81a42 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -1,12 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'PadLayer', @@ -16,7 +13,7 @@ ] -class PadLayer(Layer): +class PadLayer(Module): """The :class:`PadLayer` class is a padding layer for any mode and dimension. Please see `tf.pad `__ for usage. @@ -36,7 +33,7 @@ class PadLayer(Layer): >>> net = tl.layers.Input([None, 224, 224, 3], name='input') >>> padlayer = tl.layers.PadLayer([[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT", name='inpad')(net) >>> print(padlayer) - >>> output shape : (None, 106, 106, 3) + >>> output shape : (None, 230, 230, 3) """ @@ -71,11 +68,11 @@ def build(self, inputs_shape=None): pass def forward(self, inputs): - outputs = tf.pad(tensor=inputs, paddings=self.padding, mode=self.mode, name=self.name) + outputs = tl.ops.pad(tensor=inputs, paddings=self.padding, mode=self.mode) return outputs -class ZeroPad1d(Layer): +class ZeroPad1d(Module): """ The :class:`ZeroPad1d` class is a 1D padding layer for signal [batch, length, channel]. @@ -92,7 +89,7 @@ class ZeroPad1d(Layer): With TensorLayer >>> net = tl.layers.Input([None, 100, 1], name='input') - >>> pad1d = tl.layers.ZeroPad1d(padding=(2, 3))(net) + >>> pad1d = tl.layers.ZeroPad1d(padding=(3, 3))(net) >>> print(pad1d) >>> output shape : (None, 106, 1) @@ -121,14 +118,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - self.layer = tf.keras.layers.ZeroPadding1D(padding=self.padding, name=self.name) + self.layer = tl.ops.ZeroPadding1D(padding=self.padding) def forward(self, inputs): outputs = self.layer(inputs) return outputs -class ZeroPad2d(Layer): +class ZeroPad2d(Module): """ The :class:`ZeroPad2d` class is a 2D padding layer for image [batch, height, width, channel]. @@ -176,14 +173,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - self.layer = tf.keras.layers.ZeroPadding2D(padding=self.padding, name=self.name) + self.layer = tl.ops.ZeroPadding2D(padding=self.padding) def forward(self, inputs): outputs = self.layer(inputs) return outputs -class ZeroPad3d(Layer): +class ZeroPad3d(Module): """ The :class:`ZeroPad3d` class is a 3D padding layer for volume [batch, depth, height, width, channel]. @@ -231,7 +228,7 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - self.layer = tf.keras.layers.ZeroPadding3D(padding=self.padding, name=self.name) + self.layer = tl.ops.ZeroPadding3D(padding=self.padding) def forward(self, inputs): outputs = self.layer(inputs) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index d9deedecd..537b64c0b 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -1,12 +1,11 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module + +# TODO ADD INPUT CHECK __all__ = [ 'PoolLayer', @@ -22,15 +21,15 @@ 'GlobalMeanPool2d', 'GlobalMaxPool3d', 'GlobalMeanPool3d', - 'CornerPool2d', + # 'CornerPool2d', ] -class PoolLayer(Layer): +class PoolLayer(Module): """ The :class:`PoolLayer` class is a Pooling layer. - You can choose ``tf.nn.max_pool`` and ``tf.nn.avg_pool`` for 2D input or - ``tf.nn.max_pool3d`` and ``tf.nn.avg_pool3d`` for 3D input. + You can choose ``tl.ops.max_pool`` and ``tl.ops.avg_pool`` for 2D input or + ``tl.ops.max_pool3d`` and ``tl.ops.avg_pool3d`` for 3D input. Parameters ---------- @@ -43,7 +42,7 @@ class PoolLayer(Layer): padding : str The padding algorithm type: "SAME" or "VALID". pool : pooling function - One of ``tf.nn.max_pool``, ``tf.nn.avg_pool``, ``tf.nn.max_pool3d`` and ``f.nn.avg_pool3d``. + One of ``tl.ops.max_pool``, ``tl.ops.avg_pool``, ``tl.ops.max_pool3d`` and ``f.ops.avg_pool3d``. See `TensorFlow pooling APIs `__ name : None or str A unique layer name. @@ -63,7 +62,7 @@ def __init__( filter_size=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', - pool=tf.nn.max_pool, + pool=tl.ops.MaxPool, name=None # 'pool_pro', ): super().__init__(name) @@ -88,14 +87,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, poolname=self.pool.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self._pool = self.pool(ksize=self.filter_size, strides=self.strides, padding=self.padding) def forward(self, inputs): - outputs = self.pool(inputs, ksize=self.filter_size, strides=self.strides, padding=self.padding, name=self.name) + outputs = self._pool(inputs) return outputs -class MaxPool1d(Layer): +class MaxPool1d(Module): """Max pooling for 1D signal. Parameters @@ -167,7 +166,7 @@ def build(self, inputs_shape=None): self._dilation_rate = [self.dilation_rate] def forward(self, inputs): - outputs = tf.nn.pool( + outputs = tl.ops.pool( input=inputs, window_shape=self._filter_size, pooling_type="MAX", @@ -175,12 +174,11 @@ def forward(self, inputs): padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, - name=self.name, ) return outputs -class MeanPool1d(Layer): +class MeanPool1d(Module): """Mean pooling for 1D signal. Parameters @@ -240,7 +238,6 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - # pass # https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf/nn/pool if self.data_format == 'channels_last': self.data_format = 'NWC' @@ -253,20 +250,14 @@ def build(self, inputs_shape=None): self._dilation_rate = [self.dilation_rate] def forward(self, inputs): - outputs = tf.nn.pool( - input=inputs, - window_shape=self._filter_size, - pooling_type="AVG", - padding=self.padding, - dilations=None, # TODO: support dilations - strides=self._strides, - name=self.name, - data_format=self.data_format + outputs = tl.ops.pool( + input=inputs, window_shape=self._filter_size, pooling_type="AVG", padding=self.padding, + dilations=self._dilation_rate, strides=self._strides, data_format=self.data_format ) return outputs -class MaxPool2d(Layer): +class MaxPool2d(Module): """Max pooling for 2D image. Parameters @@ -325,23 +316,24 @@ def __repr__(self): def build(self, inputs_shape=None): if self.data_format == 'channels_last': - self._strides = [1, self.strides[0], self.strides[1], 1] self.data_format = 'NHWC' + self._strides = [1, self.strides[0], self.strides[1], 1] elif self.data_format == 'channels_first': self.data_format = 'NCHW' self._strides = [1, 1, self.strides[0], self.strides[1]] else: raise Exception("unsupported data format") - def forward(self, inputs): - outputs = tf.nn.max_pool( - input=inputs, ksize=self.filter_size, strides=self._strides, padding=self.padding, name=self.name, - data_format=self.data_format + self.max_pool = tl.ops.MaxPool( + ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format ) + + def forward(self, inputs): + outputs = self.max_pool(inputs) return outputs -class MeanPool2d(Layer): +class MeanPool2d(Module): """Mean pooling for 2D image [batch, height, width, channel]. Parameters @@ -407,16 +399,16 @@ def build(self, inputs_shape=None): self._strides = [1, 1, self.strides[0], self.strides[1]] else: raise Exception("unsupported data format") + self.avg_pool = tl.ops.AvgPool( + ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): - outputs = tf.nn.avg_pool( - input=inputs, ksize=self.filter_size, strides=self._strides, padding=self.padding, name=self.name, - data_format=self.data_format - ) + outputs = self.avg_pool(inputs) return outputs -class MaxPool3d(Layer): +class MaxPool3d(Module): """Max pooling for 3D volume. Parameters @@ -487,18 +479,17 @@ def build(self, inputs_shape=None): raise Exception("unsupported data format") def forward(self, inputs): - outputs = tf.nn.max_pool3d( + outputs = tl.ops.max_pool3d( input=inputs, ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format, - name=self.name, ) return outputs -class MeanPool3d(Layer): +class MeanPool3d(Module): """Mean pooling for 3D volume. Parameters @@ -559,28 +550,26 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): + self._strides = [1, self.strides[0], self.strides[1], self.strides[2], 1] if self.data_format == 'channels_last': self.data_format = 'NDHWC' - self._strides = [1, self.strides[0], self.strides[1], self.strides[2], 1] elif self.data_format == 'channels_first': self.data_format = 'NCDHW' - self._strides = [1, 1, self.strides[0], self.strides[1], self.strides[2]] else: raise Exception("unsupported data format") def forward(self, inputs): - outputs = tf.nn.avg_pool3d( + outputs = tl.ops.avg_pool3d( input=inputs, ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format, - name=self.name, ) return outputs -class GlobalMaxPool1d(Layer): +class GlobalMaxPool1d(Module): """The :class:`GlobalMaxPool1d` class is a 1D Global Max Pooling layer. Parameters @@ -622,21 +611,21 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_max(input_tensor=inputs, axis=1, name=self.name) + self.reduce_max = tl.ReduceMax(axis=1) elif self.data_format == 'channels_first': - outputs = tf.reduce_max(input_tensor=inputs, axis=2, name=self.name) + self.reduce_max = tl.ReduceMax(axis=2) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_max(inputs) return outputs -class GlobalMeanPool1d(Layer): +class GlobalMeanPool1d(Module): """The :class:`GlobalMeanPool1d` class is a 1D Global Mean Pooling layer. Parameters @@ -677,21 +666,21 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_mean(input_tensor=inputs, axis=1, name=self.name) + self.reduce_mean = tl.ReduceMean(axis=1) elif self.data_format == 'channels_first': - outputs = tf.reduce_mean(input_tensor=inputs, axis=2, name=self.name) + self.reduce_mean = tl.ReduceMean(axis=2) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_mean(inputs) return outputs -class GlobalMaxPool2d(Layer): +class GlobalMaxPool2d(Module): """The :class:`GlobalMaxPool2d` class is a 2D Global Max Pooling layer. Parameters @@ -732,21 +721,21 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_max(input_tensor=inputs, axis=[1, 2], name=self.name) + self.reduce_max = tl.ReduceMax(axis=[1, 2]) elif self.data_format == 'channels_first': - outputs = tf.reduce_max(input_tensor=inputs, axis=[2, 3], name=self.name) + self.reduce_max = tl.ReduceMax(axis=[2, 3]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_max(inputs) return outputs -class GlobalMeanPool2d(Layer): +class GlobalMeanPool2d(Module): """The :class:`GlobalMeanPool2d` class is a 2D Global Mean Pooling layer. Parameters @@ -788,21 +777,21 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_mean(input_tensor=inputs, axis=[1, 2], name=self.name) + self.reduce_mean = tl.ReduceMean(axis=[1, 2]) elif self.data_format == 'channels_first': - outputs = tf.reduce_mean(input_tensor=inputs, axis=[2, 3], name=self.name) + self.reduce_mean = tl.ReduceMean(axis=[2, 3]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_mean(inputs) return outputs -class GlobalMaxPool3d(Layer): +class GlobalMaxPool3d(Module): """The :class:`GlobalMaxPool3d` class is a 3D Global Max Pooling layer. Parameters @@ -844,21 +833,21 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_max(input_tensor=inputs, axis=[1, 2, 3], name=self.name) + self.reduce_max = tl.ReduceMax(axis=[1, 2, 3]) elif self.data_format == 'channels_first': - outputs = tf.reduce_max(input_tensor=inputs, axis=[2, 3, 4], name=self.name) + self.reduce_max = tl.ReduceMax(axis=[2, 3, 4]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_max(inputs) return outputs -class GlobalMeanPool3d(Layer): +class GlobalMeanPool3d(Module): """The :class:`GlobalMeanPool3d` class is a 3D Global Mean Pooling layer. Parameters @@ -903,9 +892,9 @@ def build(self, inputs_shape=None): def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tf.reduce_mean(input_tensor=inputs, axis=[1, 2, 3], name=self.name) + outputs = tl.reduce_mean(input_tensor=inputs, axis=[1, 2, 3]) elif self.data_format == 'channels_first': - outputs = tf.reduce_mean(input_tensor=inputs, axis=[2, 3, 4], name=self.name) + outputs = tl.reduce_mean(input_tensor=inputs, axis=[2, 3, 4]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" @@ -913,73 +902,73 @@ def forward(self, inputs): return outputs -class CornerPool2d(Layer): - """Corner pooling for 2D image [batch, height, width, channel], see `here `__. - - Parameters - ---------- - mode : str - TopLeft for the top left corner, - Bottomright for the bottom right corner. - name : None or str - A unique layer name. - - Examples - --------- - With TensorLayer - - >>> net = tl.layers.Input([None, 32, 32, 8], name='input') - >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) - >>> output shape : [None, 32, 32, 8] - - """ - - def __init__( - self, - mode='TopLeft', - name=None # 'cornerpool2d' - ): - super().__init__(name) - self.mode = mode - self.build() - self._built = True - - logging.info("CornerPool2d %s : mode: %s" % (self.name, str(mode))) - - def __repr__(self): - s = ('{classname}(mode={mode}') - if self.name is not None: - s += ', name=\'{name}\'' - s += ')' - return s.format(classname=self.__class__.__name__, **self.__dict__) - - def build(self, inputs_shape=None): - pass - - def forward(self, inputs): - input_width = inputs.shape[2] - input_height = inputs.shape[1] - batch_min = tf.reduce_min(inputs) - if self.mode == 'TopLeft': - temp_bottom = tf.pad( - inputs, tf.constant([[0, 0], [0, input_height - 1], [0, 0], [0, 0]]), constant_values=batch_min - ) - temp_right = tf.pad( - inputs, tf.constant([[0, 0], [0, 0], [0, input_width - 1], [0, 0]]), constant_values=batch_min - ) - temp_bottom = tf.nn.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID') - temp_right = tf.nn.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID') - outputs = tf.add(temp_bottom, temp_right, name=self.name) - elif self.mode == 'BottomRight': - temp_top = tf.pad( - inputs, tf.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min - ) - temp_left = tf.pad( - inputs, tf.constant([[0, 0], [0, 0], [input_width - 1, 0], [0, 0]]), constant_values=batch_min - ) - temp_top = tf.nn.max_pool(temp_top, ksize=(input_height, 1), strides=(1, 1), padding='VALID') - temp_left = tf.nn.max_pool(temp_left, ksize=(1, input_width), strides=(1, 1), padding='VALID') - outputs = tf.add(temp_top, temp_left, name=self.name) - else: - outputs = tf.identity(inputs, name=self.name) - return outputs +# class CornerPool2d(Layer): +# """Corner pooling for 2D image [batch, height, width, channel], see `here `__. +# +# Parameters +# ---------- +# mode : str +# TopLeft for the top left corner, +# Bottomright for the bottom right corner. +# name : None or str +# A unique layer name. +# +# Examples +# --------- +# With TensorLayer +# +# >>> net = tl.layers.Input([None, 32, 32, 8], name='input') +# >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) +# >>> output shape : [None, 32, 32, 8] +# +# """ +# +# def __init__( +# self, +# mode='TopLeft', +# name=None # 'cornerpool2d' +# ): +# super().__init__(name) +# self.mode = mode +# self.build() +# self._built = True +# +# logging.info("CornerPool2d %s : mode: %s" % (self.name, str(mode))) +# +# def __repr__(self): +# s = ('{classname}(mode={mode}') +# if self.name is not None: +# s += ', name=\'{name}\'' +# s += ')' +# return s.format(classname=self.__class__.__name__, **self.__dict__) +# +# def build(self, inputs_shape=None): +# pass +# +# def forward(self, inputs): +# input_width = inputs.shape[2] +# input_height = inputs.shape[1] +# batch_min = tl.reduce_min(inputs) +# if self.mode == 'TopLeft': +# temp_bottom = tl.pad( +# inputs, tl.constant([[0, 0], [0, input_height - 1], [0, 0], [0, 0]]), constant_values=batch_min +# ) +# temp_right = tl.pad( +# inputs, tl.constant([[0, 0], [0, 0], [0, input_width - 1], [0, 0]]), constant_values=batch_min +# ) +# temp_bottom = tl.ops.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID') +# temp_right = tl.ops.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID') +# outputs = tl.add(temp_bottom, temp_right)#, name=self.name) +# elif self.mode == 'BottomRight': +# temp_top = tl.pad( +# inputs, tl.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min +# ) +# temp_left = tl.pad( +# inputs, tl.constant([[0, 0], [0, 0], [input_width - 1, 0], [0, 0]]), constant_values=batch_min +# ) +# temp_top = tl.ops.max_pool(temp_top, ksize=(input_height, 1), strides=(1, 1), padding='VALID') +# temp_left = tl.ops.max_pool(temp_left, ksize=(1, input_width), strides=(1, 1), padding='VALID') +# outputs = tl.add(temp_top, temp_left, name=self.name) +# else: +# outputs = tl.identity(inputs, name=self.name) +# return outputs diff --git a/tensorlayer/layers/quantize.py b/tensorlayer/layers/quantize.py index fd19c9fa4..1a64f63ad 100644 --- a/tensorlayer/layers/quantize.py +++ b/tensorlayer/layers/quantize.py @@ -1,11 +1,11 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' -import tensorflow as tf - +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module from tensorlayer.layers.utils import quantize __all__ = [ @@ -13,7 +13,7 @@ ] -class Sign(Layer): +class Sign(Module): """The :class:`SignLayer` class is for quantizing the layer outputs to -1 or 1 while inferencing. Parameters @@ -45,8 +45,5 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def forward(self, inputs): - # with tf.variable_scope(name): - ## self.outputs = tl.act.sign(self.inputs) - # self.outputs = quantize(self.inputs) outputs = quantize(inputs) return outputs diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py deleted file mode 100644 index 2d3558af4..000000000 --- a/tensorlayer/layers/recurrent.py +++ /dev/null @@ -1,1265 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer - -# TODO: uncomment -__all__ = [ - 'RNN', - 'SimpleRNN', - 'GRURNN', - 'LSTMRNN', - 'BiRNN', - # 'ConvRNNCell', - # 'BasicConvLSTMCell', - # 'ConvLSTM', - 'retrieve_seq_length_op', - 'retrieve_seq_length_op2', - 'retrieve_seq_length_op3', - 'target_mask_op', -] - - -class RNN(Layer): - """ - The :class:`RNN` class is a fixed length recurrent layer for implementing simple RNN, - LSTM, GRU and etc. - - Parameters - ---------- - cell : TensorFlow cell function - A RNN cell implemented by tf.keras - - E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell - - Note TF2.0+, TF1.0+ and TF1.0- are different - - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - - Examples - -------- - For synced sequence input and output, see `PTB example `__ - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.RNN( - >>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1), - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='lstmrnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0], rnn_state[1]], name='rnn_model') - >>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM. - - A stacked RNN model. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out1 = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> return_last_output=False, return_seq_2d=False, return_last_state=False - >>> )(inputs) - >>> rnn_out2 = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> return_last_output=True, return_last_state=False - >>> )(rnn_out1) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out2) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) - - An example if the sequences have different length and contain padding. - Similar to the DynamicRNN in TL 1.x. - - If the `sequence_length` is provided in RNN's forwarding and both `return_last_output` and `return_last_state` - are set as `True`, the forward function will automatically ignore the paddings. Note that if `return_last_output` - is set as `False`, the synced sequence outputs will still include outputs which correspond with paddings, - but users are free to select which slice of outputs to be used in following procedure. - - The `sequence_length` should be a list of integers which indicates the length of each sequence. - It is recommended to - `tl.layers.retrieve_seq_length_op3 `__ - to calculate the `sequence_length`. - - >>> data = [[[1], [2], [0], [0], [0]], [[1], [2], [3], [0], [0]], [[1], [2], [6], [1], [1]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> class DynamicRNNExample(tl.models.Model): - >>> def __init__(self): - >>> super(DynamicRNNExample, self).__init__() - >>> self.rnnlayer = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=6, dropout=0.1), in_channels=1, return_last_output=True, - >>> return_last_state=True - >>> ) - >>> def forward(self, x): - >>> z, s = self.rnnlayer(x, sequence_length=tl.layers.retrieve_seq_length_op3(x)) - >>> return z, s - >>> model = DynamicRNNExample() - >>> model.eval() - >>> output, state = model(data) - - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - cell, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'rnn' - ): - - super(RNN, self).__init__(name=name) - - self.cell = cell - self.return_last_output = return_last_output - self.return_seq_2d = return_seq_2d - self.return_last_state = return_last_state - - if in_channels is not None: - self.build((None, None, in_channels)) - self._built = True - - logging.info("RNN %s: cell: %s, n_units: %s" % (self.name, self.cell.__class__.__name__, self.cell.units)) - - def __repr__(self): - s = ('{classname}(cell={cellname}, n_units={n_units}') - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, cellname=self.cell.__class__.__name__, n_units=self.cell.units, - **self.__dict__ - ) - - def build(self, inputs_shape): - """ - Parameters - ---------- - inputs_shape : tuple - the shape of inputs tensor - """ - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - if len(inputs_shape) != 3: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - with tf.name_scope(self.name) as scope: - self.cell.build(tuple(inputs_shape)) - - if self._trainable_weights is None: - self._trainable_weights = list() - for var in self.cell.trainable_variables: - self._trainable_weights.append(var) - - # @tf.function - def forward(self, inputs, sequence_length=None, initial_state=None, **kwargs): - """ - Parameters - ---------- - inputs : input tensor - The input of a network - sequence_length: None or list of integers - The actual length of each sequence in batch without padding. - If provided, when `return_last_output` and `return_last_state` are `True`, - the RNN will perform in the manner of a dynamic RNN, i.e. - the RNN will return the actual last output / state without padding. - initial_state : None or list of Tensor (RNN State) - If None, `initial_state` is zero state. - - **kwargs: dict - Some attributes can be updated during forwarding - such as `return_last_output`, `return_seq_2d`, `return_last_state`. - """ - if kwargs: - for attr in kwargs: - if attr in self.__dict__: - setattr(self, attr, kwargs[attr]) - - batch_size = inputs.get_shape().as_list()[0] - total_steps = inputs.get_shape().as_list()[1] - - # checking the type and values of sequence_length - if sequence_length is not None: - if isinstance(sequence_length, list): - pass - elif isinstance(sequence_length, tf.Tensor): - pass - elif isinstance(sequence_length, np.ndarray): - sequence_length = sequence_length.tolist() - else: - raise TypeError( - "The argument sequence_length should be either None or a list of integers. " - "Type got %s" % type(sequence_length) - ) - if (len(sequence_length) != batch_size): - raise ValueError( - "The argument sequence_length should contain %d " % batch_size + - "elements indicating the initial length of each sequence, but got only %d. " % len(sequence_length) - ) - for i in sequence_length: - if not (type(i) is int or (isinstance(i, tf.Tensor) and i.dtype.is_integer)): - raise TypeError( - "The argument sequence_length should be either None or a list of integers. " - "One element of sequence_length has the type %s" % type(i) - ) - if i > total_steps: - raise ValueError( - "The actual length of a sequence should not be longer than " - "that of the longest sequence (total steps) in this mini-batch. " - "Total steps of this mini-batch %d, " % total_steps + - "but got an actual length of a sequence %d" % i - ) - - sequence_length = [i - 1 if i >= 1 else 0 for i in sequence_length] - - # set warning - # if (not self.return_last_output) and sequence_length is not None: - # warnings.warn( - # 'return_last_output is set as %s ' % self.return_last_output + - # 'When sequence_length is provided, it is recommended to set as True. ' + - # 'Otherwise, padding will be considered while RNN is forwarding.' - # ) - - # return the last output, iterating each seq including padding ones. No need to store output during each - # time step. - if self.return_last_output and sequence_length is None: - outputs = [-1] - else: - outputs = list() - - # initialize the states if provided - states = initial_state if initial_state is not None else self.cell.get_initial_state(inputs) - if not isinstance(states, list): - states = [states] - - stored_states = list() - - # initialize the cell - self.cell.reset_dropout_mask() - self.cell.reset_recurrent_dropout_mask() - - # recurrent computation - # FIXME: if sequence_length is provided (dynamic rnn), only iterate max(sequence_length) times. - for time_step in range(total_steps): - - cell_output, states = self.cell.call(inputs[:, time_step, :], states, training=self.is_train) - stored_states.append(states) - - if self.return_last_output and sequence_length is None: - outputs[-1] = cell_output - else: - outputs.append(cell_output) - - # prepare to return results - if self.return_last_output and sequence_length is None: - outputs = outputs[-1] - - elif self.return_last_output and sequence_length is not None: - outputs = tf.convert_to_tensor(outputs) - outputs = tf.gather(outputs, sequence_length, axis=0) - - outputs_without_padding = [] - for i in range(batch_size): - outputs_without_padding.append(outputs[i][i][:]) - outputs = tf.convert_to_tensor(outputs_without_padding) - else: - if self.return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 2D Tensor [batch_size * n_steps, n_hidden] - outputs = tf.reshape(tf.concat(outputs, 1), [-1, self.cell.units]) - else: - # : stack more RNN layer after that - # 3D Tensor [batch_size, n_steps, n_hidden] - outputs = tf.reshape(tf.concat(outputs, 1), [-1, total_steps, self.cell.units]) - - if self.return_last_state and sequence_length is None: - return outputs, states - elif self.return_last_state and sequence_length is not None: - - stored_states = tf.convert_to_tensor(stored_states) - stored_states = tf.gather(stored_states, sequence_length, axis=0) - - states = [] - for i in range(stored_states.shape[1]): - states.append(tf.convert_to_tensor([stored_states[b, i, b, :] for b in range(batch_size)])) - - return outputs, states - else: - return outputs - - -class SimpleRNN(RNN): - """ - The :class:`SimpleRNN` class is a fixed length recurrent layer for implementing simple RNN. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For simple RNN, last_state = [last_output] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the simple RNN cell. - Please check tf.keras.layers.SimpleRNNCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.SimpleRNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the simple rnn cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='simplernn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'simplernn' - **kwargs - ): - super(SimpleRNN, self).__init__( - cell=tf.keras.layers.SimpleRNNCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class GRURNN(RNN): - """ - The :class:`GRURNN` class is a fixed length recurrent layer for implementing RNN with GRU cell. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For GRU, last_state = [last_output] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the GRU cell. - Please check tf.keras.layers.GRUCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.GRURNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the GRU cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='grurnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'grurnn' - **kwargs - ): - super(GRURNN, self).__init__( - cell=tf.keras.layers.GRUCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class LSTMRNN(RNN): - """ - The :class:`LSTMRNN` class is a fixed length recurrent layer for implementing RNN with LSTM cell. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For LSTM, last_state = [last_output, last_cell_state] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the LSTM cell. - Please check tf.keras.layers.LSTMCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.LSTMRNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the LSTM cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='grurnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'lstmrnn' - **kwargs - ): - super(LSTMRNN, self).__init__( - cell=tf.keras.layers.LSTMCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class BiRNN(Layer): - """ - The :class:`BiRNN` class is a fixed length Bidirectional recurrent layer. - - Parameters - ---------- - fw_cell : TensorFlow cell function for forward direction - A RNN cell implemented by tf.keras, e.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell. - Note TF2.0+, TF1.0+ and TF1.0- are different - bw_cell: TensorFlow cell function for backward direction similar with `fw_cell` - return_seq_2d : boolean. - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the two cells. The state is a list of Tensor. - - If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - - Examples - -------- - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> # the fw_cell and bw_cell can be different - >>> rnnlayer = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=True, return_last_state=True - >>> ) - >>> # if return_last_state=True, the final state of the two cells will be returned together with the outputs - >>> # if return_last_state=False, only the outputs will be returned - >>> rnn_out, rnn_fw_state, rnn_bw_state = rnnlayer(inputs) - >>> # if the BiRNN is followed by a Dense, return_seq_2d should be True. - >>> # if the BiRNN is followed by other RNN, return_seq_2d can be False. - >>> dense = tl.layers.Dense(n_units=1)(rnn_out) - >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]]) - - A stacked BiRNN model. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out1 = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=False, return_last_state=False - >>> )(inputs) - >>> rnn_out2 = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=True, return_last_state=False - >>> )(rnn_out1) - >>> dense = tl.layers.Dense(n_units=1)(rnn_out2) - >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - fw_cell, - bw_cell, - return_seq_2d=False, - return_last_state=False, - in_channels=None, - name=None, # 'birnn' - ): - super(BiRNN, self).__init__(name) - - self.fw_cell = fw_cell - self.bw_cell = bw_cell - self.return_seq_2d = return_seq_2d - self.return_last_state = return_last_state - - if in_channels is not None: - self.build((None, None, in_channels)) - self._built = True - - logging.info( - "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % ( - self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__, - self.bw_cell.units - ) - ) - - def __repr__(self): - s = ( - '{classname}(fw_cell={fw_cellname}, fw_n_units={fw_n_units}' - ', bw_cell={bw_cellname}, bw_n_units={bw_n_units}' - ) - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, fw_cellname=self.fw_cell.__class__.__name__, - fw_n_units=self.fw_cell.units, bw_cellname=self.bw_cell.__class__.__name__, bw_n_units=self.bw_cell.units, - **self.__dict__ - ) - - def build(self, inputs_shape): - """ - Parameters - ---------- - inputs_shape : tuple - the shape of inputs tensor - """ - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - if len(inputs_shape) != 3: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - with tf.name_scope(self.name) as scope: - self.fw_cell.build(tuple(inputs_shape)) - self.bw_cell.build(tuple(inputs_shape)) - - if self._trainable_weights is None: - self._trainable_weights = list() - for var in self.fw_cell.trainable_variables: - self._trainable_weights.append(var) - for var in self.bw_cell.trainable_variables: - self._trainable_weights.append(var) - - # @tf.function - def forward(self, inputs, fw_initial_state=None, bw_initial_state=None, **kwargs): - """ - Parameters - ---------- - inputs : input tensor - The input of a network - fw_initial_state : None or list of Tensor (RNN State) - If None, `fw_initial_state` is zero state. - bw_initial_state : None or list of Tensor (RNN State) - If None, `bw_initial_state` is zero state. - **kwargs: dict - Some attributes can be updated during forwarding - such as `return_last_output`, `return_seq_2d`, `return_last_state`. - """ - - if kwargs: - for attr in kwargs: - if attr in self.__dict__: - setattr(self, attr, kwargs[attr]) - - fw_outputs = list() - bw_outputs = list() - - fw_states = fw_initial_state if fw_initial_state is not None else self.fw_cell.get_initial_state(inputs) - bw_states = bw_initial_state if bw_initial_state is not None else self.bw_cell.get_initial_state(inputs) - - if not isinstance(fw_states, list): - fw_states = [fw_states] - if not isinstance(bw_states, list): - bw_states = [bw_states] - - total_steps = inputs.get_shape().as_list()[1] - - self.fw_cell.reset_dropout_mask() - self.fw_cell.reset_recurrent_dropout_mask() - self.bw_cell.reset_dropout_mask() - self.bw_cell.reset_recurrent_dropout_mask() - - for time_step in range(total_steps): - - fw_cell_output, fw_states = self.fw_cell.call(inputs[:, time_step, :], fw_states, training=self.is_train) - bw_cell_output, bw_states = self.bw_cell.call( - inputs[:, -time_step - 1, :], bw_states, training=self.is_train - ) - - fw_outputs.append(fw_cell_output) - bw_outputs.append(bw_cell_output) - - if self.return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 2D Tensor [batch_size * n_steps, n_hidden] - fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, self.fw_cell.units]) - bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, self.bw_cell.units]) - else: - # : stack more RNN layer after that - # 3D Tensor [batch_size, n_steps, n_hidden] - fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, total_steps, self.fw_cell.units]) - bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, total_steps, self.bw_cell.units]) - - outputs = tf.concat([fw_outputs, bw_outputs], -1) - - if self.return_last_state: - return outputs, fw_states, bw_states - else: - return outputs - - -''' -class ConvRNNCell(object): - """Abstract object representing an Convolutional RNN Cell.""" - - def __call__(self, inputs, state, scope=None): - """Run this RNN cell on inputs, starting from the given state.""" - raise NotImplementedError("Abstract method") - - @property - def state_size(self): - """size(s) of state(s) used by this cell.""" - raise NotImplementedError("Abstract method") - - @property - def output_size(self): - """Integer or TensorShape: size of outputs produced by this cell.""" - raise NotImplementedError("Abstract method") - - def zero_state(self, batch_size): #, dtype=LayersConfig.tf_dtype): - """Return zero-filled state tensor(s). - Args: - batch_size: int, float, or unit Tensor representing the batch size. - Returns: - tensor of shape '[batch_size x shape[0] x shape[1] x num_features] - filled with zeros - - """ - dtype = LayersConfig.tf_dtype - shape = self.shape - num_features = self.num_features - # TODO : TypeError: 'NoneType' object is not subscriptable - zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) - return zeros - - -class BasicConvLSTMCell(ConvRNNCell): - """Basic Conv LSTM recurrent network cell. - - Parameters - ----------- - shape : tuple of int - The height and width of the cell. - filter_size : tuple of int - The height and width of the filter - num_features : int - The hidden size of the cell - forget_bias : float - The bias added to forget gates (see above). - input_size : int - Deprecated and unused. - state_is_tuple : boolen - If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. - If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. - act : activation function - The activation function of this layer, tanh as default. - - """ - - def __init__( - self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, - act=tf.nn.tanh - ): - """Initialize the basic Conv LSTM cell.""" - # if not state_is_tuple: - # logging.warn("%s: Using a concatenated state is slower and will soon be " - # "deprecated. Use state_is_tuple=True.", self) - if input_size is not None: - logging.warn("%s: The input_size parameter is deprecated.", self) - self.shape = shape - self.filter_size = filter_size - self.num_features = num_features - self._forget_bias = forget_bias - self._state_is_tuple = state_is_tuple - self._activation = act - - @property - def state_size(self): - """State size of the LSTMStateTuple.""" - return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) - - @property - def output_size(self): - """Number of units in outputs.""" - return self._num_units - - def __call__(self, inputs, state, scope=None): - """Long short-term memory cell (LSTM).""" - with tf.compat.v1.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" - # Parameters of gates are concatenated into one multiply for efficiency. - if self._state_is_tuple: - c, h = state - else: - # print state - # c, h = tf.split(3, 2, state) - c, h = tf.split(state, 2, 3) - concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) - - # i = input_gate, j = new_input, f = forget_gate, o = output_gate - # i, j, f, o = tf.split(3, 4, concat) - i, j, f, o = tf.split(concat, 4, 3) - - new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) - new_h = self._activation(new_c) * tf.nn.sigmoid(o) - - if self._state_is_tuple: - new_state = LSTMStateTuple(new_c, new_h) - else: - new_state = tf.concat([new_c, new_h], 3) - return new_h, new_state - - -def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): - """convolution: - - Parameters - ---------- - args : tensor - 4D Tensor or a list of 4D, batch x n, Tensors. - filter_size : tuple of int - Filter height and width. - num_features : int - Nnumber of features. - bias_start : float - Starting value to initialize the bias; 0 by default. - scope : VariableScope - For the created subgraph; defaults to "Linear". - - Returns - -------- - - A 4D Tensor with shape [batch h w num_features] - - Raises - ------- - - ValueError : if some of the arguments has unspecified or wrong shape. - - """ - # Calculate the total size of arguments on dimension 1. - total_arg_size_depth = 0 - shapes = [a.get_shape().as_list() for a in args] - for shape in shapes: - if len(shape) != 4: - raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) - if not shape[3]: - raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) - else: - total_arg_size_depth += shape[3] - - dtype = [a.dtype for a in args][0] - - # Now the computation. - with tf.compat.v1.variable_scope(scope or "Conv"): - matrix = tf.compat.v1.get_variable( - "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype - ) - if len(args) == 1: - res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') - else: - res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') - if not bias: - return res - bias_term = tf.compat.v1.get_variable( - "Bias", [num_features], dtype=dtype, - initializer=tf.compat.v1.initializers.constant(bias_start, dtype=dtype) - ) - return res + bias_term - - -class ConvLSTM(Layer): - """A fixed length Convolutional LSTM layer. - - See this `paper `__ . - - Parameters - ---------- - prev_layer : :class:`Layer` - Previous layer - cell_shape : tuple of int - The shape of each cell width * height - filter_size : tuple of int - The size of filter width * height - cell_fn : a convolutional RNN cell - Cell function like :class:`BasicConvLSTMCell` - feature_map : int - The number of feature map in the layer. - initializer : initializer - The initializer for initializing the parameters. - n_steps : int - The sequence length. - initial_state : None or ConvLSTM State - If None, `initial_state` is zero state. - return_last : boolean - Whether return last output or all outputs in each step. - - If True, return the last output, "Sequence input and single output". - - If False, return all outputs, "Synced sequence input and output". - - In other word, if you want to stack more RNNs on this layer, set to False. - - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. - - name : str - A unique layer name. - - Attributes - ---------- - outputs : tensor - The output of this RNN. return_last_output = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, h, w, c]) - - final_state : tensor or StateTuple - The finial state of this layer. - - When state_is_tuple = False, it is the final hidden and cell states, - - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. - - initial_state : tensor or StateTuple - It is the initial state of this ConvLSTM layer, you can use it to initialize - your state at the beginning of each epoch or iteration according to your - training procedure. - - batch_size : int or tensor - Is int, if able to compute the batch_size, otherwise, tensor for ``?``. - - """ - - @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - prev_layer, - cell_shape=None, - feature_map=1, - filter_size=(3, 3), - cell_fn=BasicConvLSTMCell, - initializer=tf.compat.v1.initializers.random_uniform(-0.1, 0.1), - n_steps=5, - initial_state=None, - return_last=False, - return_seq_2d=False, - name='convlstm', - ): - super(ConvLSTM, self).__init__(prev_layer=prev_layer, name=name) - - logging.info( - "ConvLSTM %s: feature_map: %d, n_steps: %d, " - "in_dim: %d %s, cell_fn: %s " % - (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) - ) - # You can get the dimension by .get_shape() or ._shape, and check the - # dimension by .with_rank() as follow. - # self.inputs.get_shape().with_rank(2) - # self.inputs.get_shape().with_rank(3) - - # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] - try: - self.inputs.get_shape().with_rank(5) - except Exception: - raise Exception( - "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " - "input_y, feature_map]" - ) - - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - - if fixed_batch_size.value: - batch_size = fixed_batch_size.value - logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) - - else: - batch_size = array_ops.shape(self.inputs)[0] - logging.info(" non specified batch_size, uses a tensor instead.") - self.batch_size = batch_size - outputs = [] - self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) - - if initial_state is None: - self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) - else: - self.initial_state = initial_state - - state = self.initial_state - - # with tf.variable_scope("model", reuse=None, initializer=initializer): - with tf.compat.v1.variable_scope(name, initializer=initializer) as vs: - for time_step in range(n_steps): - if time_step > 0: tf.compat.v1.get_variable_scope().reuse_variables() - (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) - outputs.append(cell_output) - - # Retrieve just the RNN variables. - # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] - rnn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES, scope=vs.name) - - logging.info(" n_params : %d" % (len(rnn_variables))) - - if return_last: - # 2D Tensor [batch_size, n_hidden] - self.outputs = outputs[-1] - else: - if return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 4D Tensor [n_example, h, w, c] - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) - else: - # : stack more RNN layer after that - # 5D Tensor [n_example/n_steps, n_steps, h, w, c] - self.outputs = tf.reshape( - tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map] - ) - - self.final_state = state - - self._add_layers(self.outputs) - self._add_params(rnn_variables) - -''' - - -# @tf.function -def retrieve_seq_length_op(data): - """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max), n_features] with zero padding on right hand side. - - Examples - ----------- - Single feature - - >>> data = [[[1],[2],[0],[0],[0]], - >>> [[1],[2],[3],[0],[0]], - >>> [[1],[2],[6],[1],[0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op(data) - [2 3 4] - - Multiple features - - >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], - >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op(data) - [4 3 4] - - References - ------------ - Borrow from `TFlearn `__. - - """ - with tf.name_scope('GetLength'): - used = tf.sign(tf.reduce_max(input_tensor=tf.abs(data), axis=2)) - length = tf.reduce_sum(input_tensor=used, axis=1) - - return tf.cast(length, tf.int32) - - -# @tf.function -def retrieve_seq_length_op2(data): - """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max)] with zero padding on right hand side. - - Examples - ----------- - >>> data = [[1,2,0,0,0], - >>> [1,2,3,0,0], - >>> [1,2,6,1,0]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op2(data) - tensor([2 3 4]) - - """ - return tf.reduce_sum(input_tensor=tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), axis=1) - - -# @tf.function -def retrieve_seq_length_op3(data, pad_val=0): - """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or - [batch_size, n_step(max), n_features]. - - If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the - length of the string sequence. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side. - pad_val: - By default 0. If the data is tf.string, please assign this as empty string ('') - - Examples - ----------- - >>> data = [[[1],[2],[0],[0],[0]], - >>> [[1],[2],[3],[0],[0]], - >>> [[1],[2],[6],[1],[0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([2, 3, 4]) - >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], - >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([4, 3, 4]) - >>> data = [[1,2,0,0,0], - >>> [1,2,3,0,0], - >>> [1,2,6,1,0]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([2, 3, 4]) - >>> data = [['hello','world','','',''], - >>> ['hello','world','tensorlayer','',''], - >>> ['hello','world','tensorlayer','2.0','']] - >>> data = tf.convert_to_tensor(data, dtype=tf.string) - >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='') - tensor([2, 3, 4]) - - """ - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.reduce_sum( - input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), - axis=1 - ) - elif data_shape_size == 2: - return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1) - elif data_shape_size == 1: - raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list()) - else: - raise ValueError( - "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size) - ) - - -def target_mask_op(data, pad_val=0): - """ Return the mask of the input sequence data based on the padding values. - - Parameters - ----------- - data : tf.Tensor - A tensor with 2 or 3 dimensions. - pad_val: int, float, string, etc - The value that represent padding. By default, 0. For tf.string, you may use empty string. - - Examples - ----------- - >>> data = [['hello', 'world', '', '', ''], - >>> ['hello', 'world', 'tensorlayer', '', ''], - >>> ['hello', 'world', 'tensorlayer', '2.0', '']] - >>> data = tf.convert_to_tensor(data, dtype=tf.string) - >>> mask = tl.layers.target_mask_op(data, pad_val='') - >>> print(mask) - tf.Tensor( - [[1 1 0 0 0] - [1 1 1 0 0] - [1 1 1 1 0]], shape=(3, 5), dtype=int32) - >>> data = [[[1], [0], [0], [0], [0]], - >>> [[1], [2], [3], [0], [0]], - >>> [[1], [2], [0], [1], [0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> mask = tl.layers.target_mask_op(data) - >>> print(mask) - tf.Tensor( - [[1 0 0 0 0] - [1 1 1 0 0] - [1 1 0 1 0]], shape=(3, 5), dtype=int32) - >>> data = [[[0,0],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[1,0],[0,0]], - >>> [[3,3],[0,1],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> mask = tl.layers.target_mask_op(data) - >>> print(mask) - tf.Tensor( - [[0 1 1 1 0] - [1 1 1 1 0] - [1 1 1 1 0]], shape=(3, 5), dtype=int32) - """ - - if not isinstance(data, tf.Tensor): - raise AttributeError("target_mask_op: the type of input data should be tf.Tensor but got %s." % type(data)) - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) - elif data_shape_size == 2: - return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) - elif data_shape_size == 1: - raise ValueError( - "target_mask_op: data_shape %s is not supported. " - "The shape of data should have 2 or 3 dims." % (data.get_shape()) - ) - else: - raise ValueError( - "target_mask_op: handling data_shape %s hasn't been implemented! " - "The shape of data should have 2 or 3 dims" % (data.get_shape()) - ) diff --git a/tensorlayer/layers/scale.py b/tensorlayer/layers/scale.py index 3e14e462a..b86dcaee3 100644 --- a/tensorlayer/layers/scale.py +++ b/tensorlayer/layers/scale.py @@ -1,18 +1,16 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.initializers import constant -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'Scale', ] -class Scale(Layer): +class Scale(Module): """The :class:`Scale` class is to multiple a trainable scale value to the layer outputs. Usually be used on the output of binary net. Parameters @@ -25,10 +23,8 @@ class Scale(Layer): Examples ---------- >>> inputs = tl.layers.Input([8, 3]) - >>> dense = tl.layers.Dense(n_units=10)(inputs) + >>> dense = tl.layers.Dense(n_units=10, in_channels=3)(inputs) >>> outputs = tl.layers.Scale(init_scale=0.5)(dense) - >>> model = tl.models.Model(inputs=inputs, outputs=[dense, outputs]) - >>> dense_out, scale_out = model(data, is_train=True) """ @@ -53,7 +49,7 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - self.scale = self._get_weights("scale", shape=[1], init=constant(value=self.init_scale)) + self.scale = self._get_weights("scale", shape=[1], init=tl.initializers.constant(value=self.init_scale)) # @tf.function def forward(self, inputs): diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index f8e7b47db..477847dc4 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -1,12 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import flatten_reshape +from tensorlayer.layers.core import Module +import tensorlayer as tl __all__ = [ 'Flatten', @@ -16,7 +13,7 @@ ] -class Flatten(Layer): +class Flatten(Module): """A layer that reshapes high-dimension input into a vector. Then we often apply Dense, RNN, Concat and etc on the top of a flatten layer. @@ -50,15 +47,15 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.flatten_reshape = tl.ops.FlattenReshape() # @tf.function def forward(self, inputs): - outputs = flatten_reshape(inputs, name=self.name) + outputs = self.flatten_reshape(inputs) return outputs -class Reshape(Layer): +class Reshape(Module): """A layer that reshapes a given tensor. Parameters @@ -93,15 +90,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.reshape = tl.ops.Reshape(self.shape) - # @tf.function def forward(self, inputs): - outputs = tf.reshape(inputs, shape=self.shape, name=self.name) + outputs = self.reshape(inputs) return outputs -class Transpose(Layer): +class Transpose(Module): """A layer that transposes the dimension of a tensor. See `tf.transpose() `__ . @@ -144,15 +140,15 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.transpose = tl.ops.Transpose(perm=self.perm, conjugate=self.conjugate) # @tf.function def forward(self, inputs): - outputs = tf.transpose(a=inputs, perm=self.perm, conjugate=self.conjugate, name=self.name) + outputs = self.transpose(a=inputs) return outputs -class Shuffle(Layer): +class Shuffle(Module): """A layer that shuffle a 2D image [batch, height, width, channel], see `here `__. Parameters @@ -170,9 +166,10 @@ class Shuffle(Layer): """ - def __init__(self, group, name=None): #'reshape'): + def __init__(self, group, in_channels=None, name=None): #'reshape'): super(Shuffle, self).__init__(name) self.group = group + self.inchannels = in_channels logging.info("Shuffle %s" % (self.name)) @@ -187,18 +184,36 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.transpose = tl.ops.Transpose([0, 1, 2, 4, 3]) + inputs_shape = self.inchannels + if tl.BACKEND == 'mindspore' and inputs_shape == None: + raise ValueError("Do you forget to pass the keyword argument 'in_channels") + if tl.BACKEND == 'mindspore': + h, w, in_channel = inputs_shape[1:] + if in_channel % self.group != 0: + raise ValueError( + "The in_channel must be a multiple of the number of groups. The in_channel got %d and the number of groups is %d." + % (in_channel, self.group) + ) + self.reshape1 = tl.ops.Reshape([-1, h, w, in_channel // self.group, self.group]) + self.reshape2 = tl.ops.Reshape([-1, h, w, in_channel]) - # @tf.function def forward(self, inputs): - in_shape = inputs.get_shape().as_list() - h, w, in_channel = in_shape[1:] - if in_channel % self.group != 0: - raise ValueError( - "The in_channel must be a multiple of the number of groups. The in_channel got %d and the number of groups is %d." - % (in_channel, self.group) - ) - temp = tf.reshape(inputs, [-1, h, w, in_channel // self.group, self.group]) - temp = tf.transpose(temp, [0, 1, 2, 4, 3]) - outputs = tf.reshape(temp, [-1, h, w, in_channel], name=self.name) + if tl.BACKEND == 'tensorflow': + in_shape = tl.get_tensor_shape(inputs) + h, w, in_channel = in_shape[1:] + # if in_channel % self.group != 0: + # raise ValueError( + # "The in_channel must be a multiple of the number of groups. The in_channel got %d and the number of groups is %d." + # % (in_channel, self.group) + # ) + reshape1 = tl.ops.Reshape([-1, h, w, in_channel // self.group, self.group]) + temp = reshape1(inputs) + temp = self.transpose(temp) + reshape2 = tl.ops.Reshape([-1, h, w, in_channel]) + outputs = reshape2(temp) + else: + temp = self.reshape1(inputs) + temp = self.transpose(temp) + outputs = self.reshape2(temp) return outputs diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 74822d565..23c94eb75 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -2,18 +2,10 @@ # -*- coding: utf-8 -*- import numpy as np -import tensorflow as tf from six.moves import xrange -from tensorflow.python.ops import array_ops - import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer -from tensorlayer.layers.utils import flatten_reshape - -# from tensorlayer.layers.core import LayersConfig -# from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES +from tensorlayer.layers.core import Module __all__ = [ 'transformer', @@ -61,47 +53,43 @@ def transformer(U, theta, out_size, name='SpatialTransformer2dAffine'): """ def _repeat(x, n_repeats): - rep = tf.transpose(a=tf.expand_dims(tf.ones(shape=tf.stack([ + rep = tl.transpose(a=tl.expand_dims(tl.ones(shape=tl.stack([ n_repeats, - ])), 1), perm=[1, 0]) - rep = tf.cast(rep, 'int32') - x = tf.matmul(tf.reshape(x, (-1, 1)), rep) - return tf.reshape(x, [-1]) + ])), axis=1), perm=[1, 0]) + rep = tl.cast(rep, 'int32') + x = tl.matmul(tl.reshape(x, (-1, 1)), rep) + return tl.reshape(x, [-1]) def _interpolate(im, x, y, out_size): # constants - num_batch = tf.shape(input=im)[0] - height = tf.shape(input=im)[1] - width = tf.shape(input=im)[2] - channels = tf.shape(input=im)[3] - - x = tf.cast(x, 'float32') - y = tf.cast(y, 'float32') - height_f = tf.cast(height, 'float32') - width_f = tf.cast(width, 'float32') + num_batch, height, width, channels = tl.get_tensor_shape(im) + x = tl.cast(x, 'float32') + y = tl.cast(y, 'float32') + height_f = tl.cast(height, 'float32') + width_f = tl.cast(width, 'float32') out_height = out_size[0] out_width = out_size[1] - zero = tf.zeros([], dtype='int32') - max_y = tf.cast(tf.shape(input=im)[1] - 1, 'int32') - max_x = tf.cast(tf.shape(input=im)[2] - 1, 'int32') + zero = tl.zeros([], dtype='int32') + max_y = tl.cast(height - 1, 'int32') + max_x = tl.cast(width - 1, 'int32') # scale indices from [-1, 1] to [0, width/height] x = (x + 1.0) * (width_f) / 2.0 y = (y + 1.0) * (height_f) / 2.0 # do sampling - x0 = tf.cast(tf.floor(x), 'int32') + x0 = tl.cast(tl.floor(x), 'int32') x1 = x0 + 1 - y0 = tf.cast(tf.floor(y), 'int32') + y0 = tl.cast(tl.floor(y), 'int32') y1 = y0 + 1 - x0 = tf.clip_by_value(x0, zero, max_x) - x1 = tf.clip_by_value(x1, zero, max_x) - y0 = tf.clip_by_value(y0, zero, max_y) - y1 = tf.clip_by_value(y1, zero, max_y) + x0 = tl.clip_by_value(x0, zero, max_x) + x1 = tl.clip_by_value(x1, zero, max_x) + y0 = tl.clip_by_value(y0, zero, max_y) + y1 = tl.clip_by_value(y1, zero, max_y) dim2 = width dim1 = width * height - base = _repeat(tf.range(num_batch) * dim1, out_height * out_width) + base = _repeat(tl.range(num_batch) * dim1, out_height * out_width) base_y0 = base + y0 * dim2 base_y1 = base + y1 * dim2 idx_a = base_y0 + x0 @@ -111,23 +99,23 @@ def _interpolate(im, x, y, out_size): # use indices to lookup pixels in the flat image and restore # channels dim - im_flat = tf.reshape(im, tf.stack([-1, channels])) - im_flat = tf.cast(im_flat, 'float32') - Ia = tf.gather(im_flat, idx_a) - Ib = tf.gather(im_flat, idx_b) - Ic = tf.gather(im_flat, idx_c) - Id = tf.gather(im_flat, idx_d) + im_flat = tl.reshape(im, tl.stack([-1, channels])) + im_flat = tl.cast(im_flat, 'float32') + Ia = tl.gather(im_flat, idx_a) + Ib = tl.gather(im_flat, idx_b) + Ic = tl.gather(im_flat, idx_c) + Id = tl.gather(im_flat, idx_d) # and finally calculate interpolated values - x0_f = tf.cast(x0, 'float32') - x1_f = tf.cast(x1, 'float32') - y0_f = tf.cast(y0, 'float32') - y1_f = tf.cast(y1, 'float32') - wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) - wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1) - wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1) - wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1) - output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id]) + x0_f = tl.cast(x0, 'float32') + x1_f = tl.cast(x1, 'float32') + y0_f = tl.cast(y0, 'float32') + y1_f = tl.cast(y1, 'float32') + wa = tl.expand_dims(((x1_f - x) * (y1_f - y)), 1) + wb = tl.expand_dims(((x1_f - x) * (y - y0_f)), 1) + wc = tl.expand_dims(((x - x0_f) * (y1_f - y)), 1) + wd = tl.expand_dims(((x - x0_f) * (y - y0_f)), 1) + output = tl.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id]) return output def _meshgrid(height, width): @@ -136,44 +124,43 @@ def _meshgrid(height, width): # np.linspace(-1, 1, height)) # ones = np.ones(np.prod(x_t.shape)) # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) - x_t = tf.matmul( - tf.ones(shape=tf.stack([height, 1])), - tf.transpose(a=tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), perm=[1, 0]) + x_t = tl.matmul( + tl.ones(shape=tl.stack([height, 1])), + tl.transpose(a=tl.expand_dims(tl.linspace(-1.0, 1.0, width), 1), perm=[1, 0]) ) - y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.stack([1, width]))) + y_t = tl.matmul(tl.expand_dims(tl.linspace(-1.0, 1.0, height), 1), tl.ones(shape=tl.stack([1, width]))) - x_t_flat = tf.reshape(x_t, (1, -1)) - y_t_flat = tf.reshape(y_t, (1, -1)) + x_t_flat = tl.reshape(x_t, (1, -1)) + y_t_flat = tl.reshape(y_t, (1, -1)) - ones = tf.ones_like(x_t_flat) - grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) + ones = tl.ones(shape=tl.get_tensor_shape(x_t_flat)) + grid = tl.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) return grid def _transform(theta, input_dim, out_size): - num_batch = tf.shape(input=input_dim)[0] - num_channels = tf.shape(input=input_dim)[3] - theta = tf.reshape(theta, (-1, 2, 3)) - theta = tf.cast(theta, 'float32') + num_batch, _, _, num_channels = tl.get_tensor_shape(input_dim) + theta = tl.reshape(theta, (-1, 2, 3)) + theta = tl.cast(theta, 'float32') # grid of (x_t, y_t, 1), eq (1) in ref [1] out_height = out_size[0] out_width = out_size[1] grid = _meshgrid(out_height, out_width) - grid = tf.expand_dims(grid, 0) - grid = tf.reshape(grid, [-1]) - grid = tf.tile(grid, tf.stack([num_batch])) - grid = tf.reshape(grid, tf.stack([num_batch, 3, -1])) + grid = tl.expand_dims(grid, 0) + grid = tl.reshape(grid, [-1]) + grid = tl.tile(grid, tl.stack([num_batch])) + grid = tl.reshape(grid, tl.stack([num_batch, 3, -1])) # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) - T_g = tf.matmul(theta, grid) - x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) - y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) - x_s_flat = tf.reshape(x_s, [-1]) - y_s_flat = tf.reshape(y_s, [-1]) + T_g = tl.matmul(theta, grid) + x_s = tl.slice(T_g, [0, 0, 0], [-1, 1, -1]) + y_s = tl.slice(T_g, [0, 1, 0], [-1, 1, -1]) + x_s_flat = tl.reshape(x_s, [-1]) + y_s_flat = tl.reshape(y_s, [-1]) input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size) - output = tf.reshape(input_transformed, tf.stack([num_batch, out_height, out_width, num_channels])) + output = tl.reshape(input_transformed, tl.stack([num_batch, out_height, out_width, num_channels])) return output output = _transform(theta, U, out_size) @@ -200,14 +187,14 @@ def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer2dAffine Tensor of size [batch * num_transforms, out_height, out_width, num_channels] """ - with tf.compat.v1.variable_scope(name): - num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) - indices = [[i] * num_transforms for i in xrange(num_batch)] - input_repeated = tf.gather(U, tf.reshape(indices, [-1])) - return transformer(input_repeated, thetas, out_size) + # with tf.compat.v1.variable_scope(name): + num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) + indices = [[i] * num_transforms for i in xrange(num_batch)] + input_repeated = tl.gather(U, tl.reshape(indices, [-1])) + return transformer(input_repeated, thetas, out_size) -class SpatialTransformer2dAffine(Layer): +class SpatialTransformer2dAffine(Module): """The :class:`SpatialTransformer2dAffine` class is a 2D `Spatial Transformer Layer `__ for `2D Affine Transformation `__. @@ -280,14 +267,14 @@ def forward(self, inputs): n_channels is identical to that of U. """ theta_input, U = inputs - theta = tf.nn.tanh(tf.matmul(theta_input, self.W) + self.b) + theta = tl.tanh(tl.matmul(theta_input, self.W) + self.b) outputs = transformer(U, theta, out_size=self.out_size) # automatically set batch_size and channels # e.g. [?, 40, 40, ?] --> [64, 40, 40, 1] or [64, 20, 20, 4] batch_size = theta_input.shape[0] n_channels = U.shape[-1] if self.data_format == 'channel_last': - outputs = tf.reshape(outputs, shape=[batch_size, self.out_size[0], self.out_size[1], n_channels]) + outputs = tl.reshape(outputs, shape=[batch_size, self.out_size[0], self.out_size[1], n_channels]) else: raise Exception("unimplement data_format {}".format(self.data_format)) return outputs diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py index 4e37d1f9a..570f805dc 100644 --- a/tensorlayer/layers/stack.py +++ b/tensorlayer/layers/stack.py @@ -1,11 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import tensorflow as tf - +import tensorlayer as tl from tensorlayer import logging -from tensorlayer.decorators import deprecated_alias -from tensorlayer.layers.core import Layer +from tensorlayer.layers.core import Module __all__ = [ 'Stack', @@ -13,7 +11,7 @@ ] -class Stack(Layer): +class Stack(Module): """ The :class:`Stack` class is a layer for stacking a list of rank-R tensors into one rank-(R+1) tensor, see `tf.stack() `__. @@ -57,14 +55,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - pass + self.stack = tl.ops.Stack(axis=self.axis) def forward(self, inputs): - outputs = tf.stack(inputs, axis=self.axis, name=self.name) + outputs = self.stack(inputs) return outputs -class UnStack(Layer): +class UnStack(Module): """ The :class:`UnStack` class is a layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() `__. @@ -109,8 +107,8 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - pass + self.unstack = tl.ops.Unstack(num=self.num, axis=self.axis) def forward(self, inputs): - outputs = tf.unstack(inputs, num=self.num, axis=self.axis, name=self.name) + outputs = self.unstack(inputs) return outputs diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index e5dd154b1..2101a008c 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -1,13 +1,13 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import numpy as np import tensorflow as tf from tensorflow.python.ops.rnn_cell import LSTMStateTuple import tensorlayer as tl from tensorlayer import logging from tensorlayer.decorators import deprecated, deprecated_alias +from tensorlayer.backend.ops.load_backend import BACKEND __all__ = [ 'cabs', @@ -74,9 +74,9 @@ def flatten_reshape(variable, name='flatten'): """ dim = 1 - for d in variable.get_shape()[1:].as_list(): + for d in tl.get_tensor_shape(variable)[1:]: # variable.get_shape()[1:].as_list(): dim *= d - return tf.reshape(variable, shape=[-1, dim], name=name) + return tl.reshape(variable, shape=[-1, dim]) def get_collection_trainable(name=''): @@ -129,22 +129,23 @@ def get_layers_with_name(net, name="", verbose=False): return layers -def get_variable_with_initializer(scope_name, var_name, shape, init=tl.initializers.random_normal()): +def get_variable_with_initializer(scope_name, var_name, shape, init=tl.initializers.random_normal(), trainable=True): # FIXME: documentation needed - # if tf.executing_eagerly(): var_name = scope_name + "/" + var_name - # if init_args is not None and len(init_args) != 0: - # initial_value = init(**init_args)(shape=shape) - # else: - # initial_value = init()(shape=shape) - # var = tf.Variable(initial_value=initial_value, name=var_name) # FIXME: not sure whether this is correct? + # TODO mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w] + if BACKEND == 'mindspore': + if len(shape) == 2: + pass + else: + shape = shape[::-1] + initial_value = init(shape=shape) - var = tf.Variable(initial_value=initial_value, name=var_name) #, **init_args) - # else: - # with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE): - # var = tf.get_variable(name=var_name, initializer=tf.zeros(shape), trainable=train) + if BACKEND == 'dragon': + return initial_value + + var = tl.Variable(initial_value=initial_value, name=var_name, trainable=trainable) return var diff --git a/tensorlayer/logging/__init__.py b/tensorlayer/logging/__init__.py index 274eef069..e3c0dac3d 100644 --- a/tensorlayer/logging/__init__.py +++ b/tensorlayer/logging/__init__.py @@ -5,7 +5,7 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ diff --git a/tensorlayer/logging/contrib/__init__.py b/tensorlayer/logging/contrib/__init__.py index dfb2f18f6..69a3ccb47 100644 --- a/tensorlayer/logging/contrib/__init__.py +++ b/tensorlayer/logging/contrib/__init__.py @@ -5,7 +5,7 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ diff --git a/tensorlayer/models/__init__.py b/tensorlayer/models/__init__.py index 7e54c8a4b..2ebf9f076 100644 --- a/tensorlayer/models/__init__.py +++ b/tensorlayer/models/__init__.py @@ -4,9 +4,9 @@ # """A collections of pre-defined well known models.""" from .core import * -from .mobilenetv1 import MobileNetV1 -from .resnet import ResNet50 -from .seq2seq import Seq2seq -from .seq2seq_with_attention import Seq2seqLuongAttention -from .squeezenetv1 import SqueezeNetV1 -from .vgg import * +# from .resnet import ResNet50 +# from .mobilenetv1 import MobileNetV1 +# from .squeezenetv1 import SqueezeNetV1 +# from .vgg import * +# from .seq2seq import Seq2seq +# from .seq2seq_with_attention import Seq2seqLuongAttention diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 514db708f..2e4c640c5 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -1,852 +1,142 @@ -import os -from abc import abstractmethod -from queue import Queue - -import tensorflow as tf -from tensorflow.python.framework import ops as tf_ops +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- -import tensorlayer as tl -from tensorlayer import logging +from collections.abc import Iterable from tensorlayer.files import utils -from tensorlayer.layers import Layer, ModelLayer - -__all__ = [ - 'Model', -] - -_global_model_name_dict = {} # TODO: better implementation? -_global_model_name_set = set() - - -class Model(object): - """The :class:`Model` class represents a neural network. - - It should be subclassed when implementing a dynamic model, - where 'forward' method must be overwritten. - Otherwise, please specify 'inputs' tensor(s) and 'outputs' tensor(s) - to create a static model. In that case, 'inputs' tensors should come - from tl.layers.Input(). - - Parameters - ----------- - inputs : a Layer or list of Layer - The input(s) to the model. - outputs : a Layer or list of Layer - The output(s) to the model. - name : None or str - The name of the model. - - Methods - --------- - __init__(self, inputs=None, outputs=None, name=None) - Initializing the Model. - inputs() - Get input tensors to this network (only avaiable for static model). - outputs() - Get output tensors to this network (only avaiable for static model). - __call__(inputs, is_train=None, **kwargs) - Forward input tensors through this network. - all_layers() - Get all layer objects of this network in a list of layers. - weights() - Get the weights of this network in a list of tensors. - train() - Set this network in training mode. (affect layers e.g. Dropout, BatchNorm). - eval() - Set this network in evaluation mode. - as_layer() - Set this network as a ModelLayer so that it can be integrated into another Model. - release_memory() - Release the memory that was taken up by tensors which are maintained by this network. - save_weights(self, filepath, format='hdf5') - Save the weights of this network in a given format. - load_weights(self, filepath, format=None, in_order=True, skip=False) - Load weights into this network from a specified file. - save(self, filepath, save_weights=True) - Save the network with/without weights. - load(filepath, save_weights=True) - Load the network with/without weights. - - Examples - --------- - >>> import tensorflow as tf - >>> import numpy as np - >>> from tensorlayer.layers import Input, Dense, Dropout - >>> from tensorlayer.models import Model - - Define static model - - >>> class CustomModel(Model): - >>> def __init__(self): - >>> super(CustomModel, self).__init__() - >>> self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) - >>> self.dropout1 = Dropout(keep=0.8) - >>> self.dense2 = Dense(n_units=10, in_channels=800) - >>> def forward(self, x): - >>> z = self.dense1(x) - >>> z = self.dropout1(z) - >>> z = self.dense2(z) - >>> return z - >>> M_dynamic = CustomModel() - - Define static model - - >>> ni = Input([None, 784]) - >>> nn = Dense(n_units=800, act=tf.nn.relu)(ni) - >>> nn = Dropout(keep=0.8)(nn) - >>> nn = Dense(n_units=10, act=tf.nn.relu)(nn) - >>> M_static = Model(inputs=ni, outputs=nn, name="mlp") - - Get network information - - >>> print(M_static) - ... Model( - ... (_inputlayer): Input(shape=[None, 784], name='_inputlayer') - ... (dense): Dense(n_units=800, relu, in_channels='784', name='dense') - ... (dropout): Dropout(keep=0.8, name='dropout') - ... (dense_1): Dense(n_units=10, relu, in_channels='800', name='dense_1') - ... ) - - Forwarding through this network - - >>> data = np.random.normal(size=[16, 784]).astype(np.float32) - >>> outputs_d = M_dynamic(data) - >>> outputs_s = M_static(data) - - Save and load weights - - >>> M_static.save_weights('./model_weights.h5') - >>> M_static.load_weights('./model_weights.h5') - - Save and load the model - - >>> M_static.save('./model.h5') - >>> M = Model.load('./model.h5') - - Convert model to layer - - >>> M_layer = M_static.as_layer() - +from tensorlayer import logging +import tensorlayer as tl +from tensorlayer.layers.core import Module +import numpy as np +import os +import time + +if tl.BACKEND == 'tensorflow': + import tensorflow as tf +if tl.BACKEND == 'mindspore': + import mindspore as ms + from mindspore.ops import composite + from mindspore.ops import operations as P + from mindspore.ops import functional as F + # from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) + # from mindspore.train.parallel_utils import ParallelMode + from mindspore.nn.wrap import DistributedGradReducer + from mindspore.common import ParameterTuple + + +class Model: """ - - @property - def inputs(self): - return self._inputs - - @property - def outputs(self): - return self._outputs - - def __init__(self, inputs=None, outputs=None, name=None): - """ - Initializing the Model. - - Parameters - ---------- - inputs : Tensor or list of tensors - Input tensor(s), which must come from tl.layers.Input() - outputs : Tensor or list of tensors - Output tensor(s), which must be the output(s) of some TL layers - name : str or None - Name for this network - """ - # Auto naming if the name is not given - self._NameNone = False - global _global_model_name_dict - global _global_model_name_set - if name is None: - self._NameNone = True - prefix = self.__class__.__name__.lower() - if _global_model_name_dict.get(prefix) is not None: - _global_model_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_model_name_dict[prefix]) - else: - _global_model_name_dict[prefix] = 0 - name = prefix - while name in _global_model_name_set: - _global_model_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_model_name_dict[prefix]) - _global_model_name_set.add(name) - else: - if name in _global_model_name_set: - raise ValueError( - 'Model name \'%s\' has already been used by another model. Please change the model name.' % name - ) - _global_model_name_set.add(name) - _global_model_name_dict[name] = 0 - - # Model properties - self.name = name - - # Model state: train or test - self.is_train = None - - # Model weights - self._all_weights = None - self._trainable_weights = None - self._nontrainable_weights = None - - # Model args of all layers, ordered by all_layers - self._config = None - - # Model inputs and outputs - # TODO: note that in dynamic network, inputs and outputs are both None, may cause problem, test needed - self._inputs = inputs - self._outputs = outputs - - # Model converted into a Layer - self._model_layer = None - - # Layer Node status - self._nodes_fixed = False - - # Model layers - self._all_layers = None - - if inputs is None and outputs is None: - pass - - else: - # check type of inputs and outputs - check_order = ['inputs', 'outputs'] - for co, check_argu in enumerate([inputs, outputs]): - if isinstance(check_argu, - (tf.Tensor, tf.SparseTensor, tf.Variable)) or tf_ops.is_dense_tensor_like(check_argu): - pass - elif isinstance(check_argu, list): - if len(check_argu) == 0: - raise ValueError( - "The argument `%s` is detected as an empty list. " % check_order[co] + - "It should be either Tensor or a list of Tensor." - ) - for idx in range(len(check_argu)): - if not isinstance(check_argu[idx], - (tf.Tensor, tf.SparseTensor, tf.Variable)) or not tf_ops.is_dense_tensor_like( - check_argu[idx]): - raise TypeError( - "The argument `%s` should be either Tensor or a list of Tensor " % (check_order[co]) + - "but the %s[%d] is detected as %s" % (check_order[co], idx, type(check_argu[idx])) - ) - else: - raise TypeError( - "The argument `%s` should be either Tensor or a list of Tensor but received %s" % - (check_order[co], type(check_argu)) - ) - - if not _check_tl_layer_tensors(inputs): - raise TypeError( - "The argument `inputs` should be either Tensor or a list of Tensor " - "that come from TensorLayer's Input layer: tl.layers.Input(shape). " - ) - if not _check_tl_layer_tensors(outputs): - raise TypeError( - "The argument `outputs` should be either Tensor or a list of Tensor " - "that is/are outputs from some TensorLayer's layers, e.g. tl.layers.Dense, tl.layers.Conv2d." - ) - - # build network graph - self._node_by_depth, self._all_layers = self._construct_graph() - - self._fix_nodes_for_layers() - - def __call__(self, inputs, is_train=None, **kwargs): - """Forward input tensors through this network by calling. - - Parameters - ---------- - inputs : Tensor or list of Tensors, numpy.ndarray of list of numpy.ndarray - Inputs for network forwarding - is_train : boolean - Network's mode for this time forwarding. If 'is_train' == True, this network is set as training mode. - If 'is_train' == False, this network is set as evaluation mode - kwargs : - For other keyword-only arguments. - - """ - - self._check_mode(is_train) - - # FIXME: this may cause inefficiency, this is used to check if every layer is built - self.all_layers - - # fix LayerNodes when first calling - if self._nodes_fixed is False: - self._fix_nodes_for_layers() - - # set training / inference mode if necessary - if is_train is not None: - self._set_mode_for_layers(is_train) - - # if self._input is a list, then it must be a static network - if isinstance(self._inputs, list): - if not isinstance(inputs, list): - raise ValueError("The argument `inputs` should be a list of values but detected as %s." % type(inputs)) - elif len(inputs) != len(self._inputs): - raise ValueError( - "The argument `inputs` should be a list with len=%d but detected as len=%d." % - (len(self._inputs), len(inputs)) - ) - - # convert inputs to tensor if it is originally not - # FIXME: not sure convert_to_tensor here or ask user to do it - if isinstance(inputs, list): - for idx in range(len(inputs)): - inputs[idx] = tf.convert_to_tensor(inputs[idx]) - else: - inputs = tf.convert_to_tensor(inputs) - - return self.forward(inputs, **kwargs) - - @abstractmethod - def forward(self, *inputs, **kwargs): - """Network forwarding given input tensors - - Parameters - ---------- - inputs : Tensor or list of Tensors - input tensor(s) - kwargs : - For other keyword-only arguments. - - Returns - ------- - output tensor(s) : Tensor or list of Tensor(s) - - """ - # FIXME: currently using self._outputs to judge static network or dynamic network - if self._outputs is None: - raise ValueError( - "Outputs not defined. Please define inputs and outputs when the model is created. Or overwrite forward() function." - ) - - memory = dict() - - # get each layer's output by going through the graph in depth order - for depth, nodes in enumerate(self._node_by_depth): - if depth == 0: - if isinstance(self.inputs, list): - assert len(inputs[0]) == len(nodes) - for idx, node in enumerate(nodes): - memory[node.name] = node(inputs[0][idx]) - else: - memory[nodes[0].name] = nodes[0](inputs[0]) - else: - for node in nodes: - in_nodes = node.in_nodes - in_tensors_idxes = node.in_tensors_idxes - if len(in_nodes) == 1: - node_input = memory[in_nodes[0].name][in_tensors_idxes[0]] - else: - node_input = [memory[inode.name][idx] for inode, idx in zip(in_nodes, in_tensors_idxes)] - memory[node.name] = node(node_input) - - if not isinstance(self._outputs, list): - return memory[self._outputs._info[0].name][self._outputs._info[1]] - else: - return [memory[tensor._info[0].name][tensor._info[1]] for tensor in self._outputs] - - @property - def all_layers(self): - """Return all layers of this network in a list.""" - if self._all_layers is not None: - return self._all_layers - - if self._inputs is not None and self._outputs is not None: - # static model - return self._all_layers - else: - # dynamic model - self._all_layers = list() - attr_list = [attr for attr in dir(self) if attr[:2] != "__"] - attr_list.remove("all_weights") - attr_list.remove("trainable_weights") - attr_list.remove("nontrainable_weights") - attr_list.remove("_all_weights") - attr_list.remove("_trainable_weights") - attr_list.remove("_nontrainable_weights") - attr_list.remove("all_layers") - attr_list.remove("_all_layers") - attr_list.remove("n_weights") - for idx, attr in enumerate(attr_list): - try: - if isinstance(getattr(self, attr), Layer): - nowlayer = getattr(self, attr) - if not nowlayer._built: - raise AttributeError("Layer %s not built yet." % repr(nowlayer)) - self._all_layers.append(nowlayer) - elif isinstance(getattr(self, attr), Model): - nowmodel = getattr(self, attr) - self._all_layers.append(nowmodel) - elif isinstance(getattr(self, attr), list): - self._all_layers.extend(_add_list_to_all_layers(getattr(self, attr))) - # TODO: define customised exception for TL - except AttributeError as e: - raise e - except Exception: - pass - - # check layer name uniqueness - local_layer_name_dict = set() - for layer in self._all_layers: - if layer.name in local_layer_name_dict: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) - else: - local_layer_name_dict.add(layer.name) - return self._all_layers - - @property - def trainable_weights(self): - """Return trainable weights of this network in a list.""" - if self._trainable_weights is not None and len(self._trainable_weights) > 0: - # self._trainable_weights already extracted, so do nothing - pass - else: - self._trainable_weights = [] - for layer in self.all_layers: - if layer.trainable_weights is not None: - self._trainable_weights.extend(layer.trainable_weights) - - return self._trainable_weights.copy() - - @property - def nontrainable_weights(self): - """Return nontrainable weights of this network in a list.""" - if self._nontrainable_weights is not None and len(self._nontrainable_weights) > 0: - # self._nontrainable_weights already extracted, so do nothing - pass - else: - self._nontrainable_weights = [] - for layer in self.all_layers: - if layer.nontrainable_weights is not None: - self._nontrainable_weights.extend(layer.nontrainable_weights) - - return self._nontrainable_weights.copy() - - @property - def all_weights(self): - """Return all weights of this network in a list.""" - if self._all_weights is not None and len(self._all_weights) > 0: - # self._all_weights already extracted, so do nothing - pass - else: - self._all_weights = [] - for layer in self.all_layers: - if layer.all_weights is not None: - self._all_weights.extend(layer.all_weights) - - return self._all_weights.copy() - - @property - def n_weights(self): - """Return the number of weights (parameters) in this network.""" - n_weights = 0 - for i, w in enumerate(self.all_weights): - n = 1 - # for s in p.eval().shape: - for s in w.get_shape(): - try: - s = int(s) - except: - s = 1 - if s: - n = n * s - n_weights = n_weights + n - # print("num of weights (parameters) %d" % n_weights) - return n_weights - - @property - def config(self): - if self._config is not None and len(self._config) > 0: - return self._config - else: - # _config = [] - _config = {} - if self._NameNone is True: - _config.update({"name": None}) - else: - _config.update({"name": self.name}) - version_info = { - "tensorlayer_version": tl.__version__, - "backend": "tensorflow", - "backend_version": tf.__version__, - "training_device": "gpu", - "save_date": None, - } - _config["version_info"] = version_info - # if self.outputs is None: - # raise RuntimeError( - # "Dynamic mode does not support config yet." - # ) - model_architecture = [] - for layer in self.all_layers: - model_architecture.append(layer.config) - _config["model_architecture"] = model_architecture - if self.inputs is not None: - if not isinstance(self.inputs, list): - _config.update({"inputs": self.inputs._info[0].name}) - else: - config_inputs = [] - for config_input in self.inputs: - config_inputs.append(config_input._info[0].name) - _config.update({"inputs": config_inputs}) - if self.outputs is not None: - if not isinstance(self.outputs, list): - _config.update({"outputs": self.outputs._info[0].name}) - else: - config_outputs = [] - for config_output in self.outputs: - config_outputs.append(config_output._info[0].name) - _config.update({"outputs": config_outputs}) - if self._nodes_fixed or self.outputs is None: - self._config = _config - - return _config - - def train(self): - """Set this network in training mode. After calling this method, - all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. - - Examples - -------- + High-Level API for Training or Testing. + + `Model` groups layers into an object with training and inference features. + + Args: + network : The training or testing network. + loss_fn : Objective function, if loss_fn is None, the + network should contain the logic of loss and grads calculation, and the logic + of parallel if needed. Default: None. + optimizer : Optimizer for updating the weights. Default: None. + metrics (Union[dict, set]): Dict or set of metrics to be evaluated by the model during + training and testing. eg: {'accuracy', 'recall'}. Default: None. + eval_network (Cell): Network for evaluation. If not defined, `network` and `loss_fn` would be wrapped as + `eval_network`. Default: None. + eval_indexes (list): In case of defining the `eval_network`, if `eval_indexes` is None, all outputs of + `eval_network` would be passed to metrics, otherwise `eval_indexes` must contain three + elements, representing the positions of loss value, predict value and label, the loss + value would be passed to `Loss` metric, predict value and label would be passed to other + metric. Default: None. + amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed + precision training. Supports [O0, O2, O3]. Default: "O0". + + - O0: Do not change. + - O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale. + - O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'. + + O2 is recommended on GPU, O3 is recommended on Ascend. + + loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else + scale the loss by LossScaleManager. If it is set, overwrite the level setting. It's a eyword argument. + e.g. Use `loss_scale_manager=None` to set the value. + keep_batchnorm_fp32 (bool): Keep Batchnorm run in `float32`. If set, overwrite the level setting. Default: True. + + Examples: >>> import tensorlayer as tl - >>> net = tl.models.vgg16() - >>> net.train() - - """ - if self.is_train !=True: - self.is_train = True - self._set_mode_for_layers(True) - - def eval(self): - """Set this network in evaluation mode. After calling this method, - all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. - - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.models.vgg16() - >>> net.eval() - # do evaluation - - """ - if self.is_train != False: - self.is_train = False - self._set_mode_for_layers(False) - - def test(self): - """Set this network in evaluation mode.""" - self.eval() - - def infer(self): - """Set this network in evaluation mode.""" - self.eval() - - def as_layer(self): - """Return this network as a ModelLayer so that it can be integrated into another Model. - - Examples - -------- - >>> from tensorlayer.layers import Input, Dense, Dropout - >>> from tensorlayer.models import Model - >>> ni = Input([None, 784]) - >>> nn = Dense(n_units=800, act=tf.nn.relu)(ni) - >>> nn = Dropout(keep=0.8)(nn) - >>> nn = Dense(n_units=10, act=tf.nn.relu)(nn) - >>> M_hidden = Model(inputs=ni, outputs=nn, name="mlp").as_layer() - >>> nn = M_hidden(ni) # use previously constructed model as layer - >>> nn = Dropout(keep=0.8)(nn) - >>> nn = Dense(n_units=10, act=tf.nn.relu)(nn) - >>> M_full = Model(inputs=ni, outputs=nn, name="mlp") - - """ - if self._outputs is None: - raise AttributeError("Dynamic network cannot be converted to Layer.") - - if self._model_layer is None: - self._model_layer = ModelLayer(self) - - return self._model_layer - - def _check_mode(self, is_train): - """Check whether this network is in a given mode. - - Parameters - ---------- - is_train : boolean - Network's mode. True means training mode while False means evaluation mode. + >>> class Net(Module): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self.conv = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), in_channels=5, name='conv2d') + >>> self.bn = tl.layers.BatchNorm2d(num_features=32, act=tl.ReLU) + >>> self.flatten = tl.layers.Flatten() + >>> self.fc = tl.layers.Dense(n_units=12, in_channels=32*224*224) # padding=0 + >>> + >>> def construct(self, x): + >>> x = self.conv(x) + >>> x = self.bn(x) + >>> x = self.flatten(x) + >>> out = self.fc(x) + >>> return out + >>> + >>> net = Net() + >>> loss = tl.cost.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + >>> optim = tl.layers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) + >>> dataset = get_dataset() + >>> model.train(2, dataset) + """ - """ - # contradiction test - if is_train is None and self.is_train is None: - raise ValueError( - "Training / inference mode not defined. Argument `is_train` should be set as True / False. Otherwise please use `Model.train()` / `Model.eval()` to switch the mode." + def __init__( + self, network, loss_fn=None, optimizer=None, metrics=None, eval_network=None, eval_indexes=None, amp_level="O0", + **kwargs + ): + self.network = network + self.loss_fn = loss_fn + self.optimizer = optimizer + self.metrics = metrics + self.all_weights = network.all_weights + self.train_weights = self.network.trainable_weights + + def train(self, n_epoch, train_dataset=None, test_dataset=False, print_train_batch=False, print_freq=5): + if not isinstance(train_dataset, Iterable): + raise Exception("Expected type in (train_dataset, Iterable), but got {}.".format(type(train_dataset))) + + if tl.BACKEND == 'tensorflow': + self.tf_train( + n_epoch=n_epoch, train_dataset=train_dataset, network=self.network, loss_fn=self.loss_fn, + train_weights=self.train_weights, optimizer=self.optimizer, metrics=self.metrics, + print_train_batch=print_train_batch, print_freq=print_freq, test_dataset=test_dataset ) - elif is_train is not None and self.is_train is not None: - if is_train == self.is_train: - logging.warning( - "Training / inference mode redefined redundantly. Please EITHER use the argument `is_train` OR `Model.train()` / `Model.eval()` to define the mode." - ) - else: - raise AttributeError( - "Training / inference mode mismatch. The argument `is_train` is set as %s, " % is_train + - "but the mode is currently set as %s. " % - ('Training by Model.train()' if self.is_train else 'Inference by Model.eval()') + - "Please EITHER use the argument `is_train` OR `Model.train()` / `Model.eval()` to define the mode." - ) - - def _set_mode_for_layers(self, is_train): - """Set all layers of this network to a given mode. - - Parameters - ---------- - is_train : boolean - Network's mode. True means training mode while False means evaluation mode. - - """ - for layer in self.all_layers: - if isinstance(layer, Model): - layer.is_train = is_train - layer._set_mode_for_layers(is_train) - - def _fix_nodes_for_layers(self): - """Fix each Layer's LayerNode to stop growing, see LayerNode for more.""" - for layer in self.all_layers: - layer._fix_nodes_for_layers() - self._nodes_fixed = True - - def __setattr__(self, key, value): - if isinstance(value, Layer): - if value._built is False: - raise AttributeError( - "The registered layer `{}` should be built in advance. " - "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) - ) - super().__setattr__(key, value) - - def __repr__(self): - # tmpstr = self.__class__.__name__ + '(\n' - tmpstr = self.name + '(\n' - for idx, layer in enumerate(self.all_layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + layer.name + '): ' + modstr + '\n' - tmpstr = tmpstr + ')' - return tmpstr - - ## raise Exceptions for old version codes - def print_all_layers(self): - raise Exception("please change net.print_all_layers --> print(net)") - - def count_params(self, **kwargs): - raise Exception("please change count_params --> count_weights") - - def print_params(self, **kwargs): - raise Exception("please change print_params --> print_weights") - - @property - def all_params(self): - raise Exception("please change all_params --> weights") - - @property - def all_drop(self): - raise Exception("all_drop is deprecated") - - def get_layer(self, name=None, index=None): - """Network forwarding given input tensors - - Parameters - ---------- - name : str or None - Name of the requested layer. Default None. - index : int or None - Index of the requested layer. Default None. - - Returns - ------- - layer : The requested layer - - Notes - ----- - Either a layer name or a layer index should be given. - - """ - if index is not None: - if len(self.all_layers) <= index: - raise ValueError( - 'model only has ' + str(len(self.all_layers)) + ' layers, but ' + str(index) + - '-th layer is requested.' - ) - else: - return self.all_layers[index] - elif name is not None: - for layer in self.all_layers: - if layer.name == name: - return layer - raise ValueError('Model has no layer named ' + name + '.') - else: - raise ValueError('Either a layer name or a layer index should be given.') - - def _construct_graph(self): - """construct computation graph for static model using LayerNode object""" - all_layers = [] - node_by_depth = [] # [[node0, node1], [node2, node3], ...] - - input_tensors_list = self.inputs if isinstance(self.inputs, list) else [self.inputs] - - queue_node = Queue() - - # BFS to visit all nodes that should be involved in the computation graph - output_tensors_list = self.outputs if isinstance(self.outputs, list) else [self.outputs] - output_nodes = [tensor._info[0] for tensor in output_tensors_list] - - visited_node_names = set() - for out_node in output_nodes: - if out_node.visited: - continue - queue_node.put(out_node) - - while not queue_node.empty(): - cur_node = queue_node.get() - in_nodes = cur_node.in_nodes - - for node in in_nodes: - node.out_nodes.append(cur_node) - if not node.visited: - queue_node.put(node) - node.visited = True - if node.name not in visited_node_names: - visited_node_names.add(node.name) - # else have multiple layers with the same name - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' - % node.layer.name - ) - - # construct the computation graph in top-sort order - cur_depth = [tensor._info[0] for tensor in input_tensors_list] - next_depth = [] - indegrees = {} - - visited_layer_names = [] - while not len(cur_depth) == 0: - node_by_depth.append(cur_depth) - for node in cur_depth: - if node.layer.name not in visited_layer_names: - all_layers.append(node.layer) - visited_layer_names.append(node.layer.name) - for out_node in node.out_nodes: - if out_node.name not in indegrees.keys(): - indegrees[out_node.name] = len(out_node.in_nodes) - indegrees[out_node.name] -= 1 - if indegrees[out_node.name] == 0: - next_depth.append(out_node) - - cur_depth = next_depth - next_depth = [] - - return node_by_depth, all_layers - - def release_memory(self): - ''' - WARNING: This function should be called with great caution. - - Release objects that MAY NOT be necessary such as layer.outputs (if in a tf.GradientTape() scope). - For each layer in the model, layer.inputs and layer.outputs will be set as None but not deleted. - - A void function. - - Examples - -------- - >>> import tensorlayer as tl - >>> vgg = tl.models.vgg16() - ... # training preparation - ... # ... - ... # back propagation - >>> with tf.GradientTape() as tape: - >>> _logits = vgg(x_batch) - >>> ## compute loss and update model - >>> _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - >>> ## release unnecessary objects (layer.inputs, layer.outputs) - >>> ## this function should be called with great caution - >>> ## within the scope of tf.GradientTape(), using this function should be fine - >>> vgg.release_memory() - - ''' - for layer in self.all_layers: - layer._release_memory() - - def save(self, filepath, save_weights=True, customized_data=None): - """ - Save model into a given file. - This function save can save both the architecture of neural networks and weights (optional). - WARNING: If the model contains Lambda / ElementwiseLambda layer, please check the documentation of Lambda / ElementwiseLambda layer and find out the cases that have / have not been supported by Model.save(). - - Parameters - ---------- - filepath : str - Filename into which the model will be saved. - save_weights : bool - Whether to save model weights. - customized_data : dict - The user customized meta data. - - Examples - -------- - >>> net = tl.models.vgg16() - >>> net.save('./model.h5', save_weights=True) - >>> new_net = Model.load('./model.h5', load_weights=True) - - """ - # TODO: support saving LambdaLayer that includes parametric self defined function with outside variables - if self.outputs is None: - raise RuntimeError( - "Model save() not support dynamic mode yet.\nHint: you can use Model save_weights() to save the weights in dynamic mode." + elif tl.BACKEND == 'mindspore': + self.ms_train( + n_epoch=n_epoch, train_dataset=train_dataset, network=self.network, loss_fn=self.loss_fn, + train_weights=self.train_weights, optimizer=self.optimizer, metrics=self.metrics, + print_train_batch=print_train_batch, print_freq=print_freq, test_dataset=test_dataset ) - utils.save_hdf5_graph( - network=self, filepath=filepath, save_weights=save_weights, customized_data=customized_data - ) - - @staticmethod - def load(filepath, load_weights=True): - """ - Load model from a given file, which should be previously saved by Model.save(). - This function load can load both the architecture of neural networks and weights (optional, and needs to be saved in Model.save()). - When a model is loaded by this function load, there is no need to reimplement or declare the architecture of the model explicitly in code. - WARNING: If the model contains Lambda / ElementwiseLambda layer, please check the documentation of Lambda / ElementwiseLambda layer and find out the cases that have / have not been supported by Model.load(). - - Parameters - ---------- - filepath : str - Filename from which the model will be loaded. - load_weights : bool - Whether to load model weights. - Examples - -------- - >>> net = tl.models.vgg16() - >>> net.save('./model.h5', save_weights=True) - >>> new_net = Model.load('./model.h5', load_weights=True) - """ - # TODO: support loading LambdaLayer that includes parametric self defined function with outside variables - M = utils.load_hdf5_graph(filepath=filepath, load_weights=load_weights) - return M + def eval(self, test_dataset): + self.network.eval() + test_loss, test_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_dataset: + _logits = self.network(X_batch) + test_loss += self.loss_fn(_logits, y_batch) + if self.metrics: + test_acc += self.metrics(_logits, y_batch) + else: + test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" test loss: {}".format(test_loss / n_iter)) + print(" test acc: {}".format(test_acc / n_iter)) - def save_weights(self, filepath, format=None): - """Input filepath, save model weights into a file of given format. + def save_weights(self, file_path, format=None): + """Input file_path, save model weights into a file of given format. Use self.load_weights() to restore. Parameters ---------- - filepath : str + file_path : str Filename to which the model weights will be saved. format : str or None Saved file format. Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - 1) If this is set to None, then the postfix of filepath will be used to decide saved format. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of the hdf5 file. @@ -859,34 +149,36 @@ def save_weights(self, filepath, format=None): Examples -------- 1) Save model weights in hdf5 format by default. - >>> net = tl.models.vgg16() + >>> net = vgg16() >>> net.save_weights('./model.h5') ... >>> net.load_weights('./model.h5') 2) Save model weights in npz/npz_dict format - >>> net = tl.models.vgg16() + >>> net = vgg16() >>> net.save_weights('./model.npz') >>> net.save_weights('./model.npz', format='npz_dict') """ + + # self.all_weights = self.network.all_weights if self.all_weights is None or len(self.all_weights) == 0: logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") return if format is None: - postfix = filepath.split('.')[-1] + postfix = file_path.split('.')[-1] if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: format = postfix else: format = 'hdf5' if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(filepath, self) + utils.save_weights_to_hdf5(file_path, self) elif format == 'npz': - utils.save_npz(self.all_weights, filepath) + utils.save_npz(self.all_weights, file_path) elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, filepath) + utils.save_npz_dict(self.all_weights, file_path) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") @@ -896,15 +188,15 @@ def save_weights(self, filepath, format=None): "Other format is not supported now." ) - def load_weights(self, filepath, format=None, in_order=True, skip=False): + def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights(). Parameters ---------- - filepath : str + file_path : str Filename from which the model weights will be loaded. format : str or None - If not specified (None), the postfix of the filepath will be used to decide its format. If specified, + If not specified (None), the postfix of the file_path will be used to decide its format. If specified, value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. In addition, it should be the same format when you saved the file using self.save_weights(). Default is None. @@ -943,23 +235,23 @@ def load_weights(self, filepath, format=None, in_order=True, skip=False): 'in_order' argument will be ignored. """ - if not os.path.exists(filepath): - raise FileNotFoundError("file {} doesn't exist.".format(filepath)) + if not os.path.exists(file_path): + raise FileNotFoundError("file {} doesn't exist.".format(file_path)) if format is None: - format = filepath.split('.')[-1] + format = file_path.split('.')[-1] if format == 'hdf5' or format == 'h5': if skip ==True or in_order == False: # load by weights name - utils.load_hdf5_to_weights(filepath, self, skip) + utils.load_hdf5_to_weights(file_path, self, skip) else: # load in order - utils.load_hdf5_to_weights_in_order(filepath, self) + utils.load_hdf5_to_weights_in_order(file_path, self) elif format == 'npz': - utils.load_and_assign_npz(filepath, self) + utils.load_and_assign_npz(file_path, self) elif format == 'npz_dict': - utils.load_and_assign_npz_dict(filepath, self, skip) + utils.load_and_assign_npz_dict(file_path, self, skip) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") @@ -969,51 +261,134 @@ def load_weights(self, filepath, format=None, in_order=True, skip=False): "Other format is not supported now." ) - # TODO: not supported now - # def save_ckpt(self, sess=None, mode_name='model.ckpt', save_dir='checkpoint', global_step=None, printable=False): - # # TODO: Documentation pending - # """""" - # if not os.path.exists(save_dir): - # raise FileNotFoundError("Save directory {} doesn't exist.".format(save_dir)) - # utils.save_ckpt(sess, mode_name, save_dir, self.weights, global_step, printable) - # - # def load_ckpt(self, sess=None, mode_name='model.ckpt', save_dir='checkpoint', is_latest=True, printable=False): - # # TODO: Documentation pending - # """""" - # utils.load_ckpt(sess, mode_name, save_dir, self.weights, is_latest, printable) - - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s - - -def _check_tl_layer_tensors(tensors): - if not isinstance(tensors, list): - return hasattr(tensors, '_info') - else: - for t in tensors: - if not hasattr(t, '_info'): - return False - return True - - -def _add_list_to_all_layers(list_member): - temp_all_layers = list() - for component in list_member: - if isinstance(component, Layer): - temp_all_layers.append(component) - if not component._built: - raise AttributeError("Layer %s not built yet." % repr(component)) - elif isinstance(component, Model): - temp_all_layers.append(component) - elif isinstance(component, list): - temp_all_layers.extend(_add_list_to_all_layers(component)) - return temp_all_layers + def tf_train( + self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, + print_freq, test_dataset + ): + for epoch in range(n_epoch): + start_time = time.time() + + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_dataset: + network.set_train() + + with tf.GradientTape() as tape: + # compute outputs + _logits = network(X_batch) + # compute loss and update model + _loss_ce = loss_fn(_logits, y_batch) + + grad = tape.gradient(_loss_ce, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) + + train_loss += _loss_ce + if metrics: + train_acc += metrics(_logits, y_batch) + else: + train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + + if print_train_batch: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + if test_dataset: + # use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + network.eval() + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_dataset: + _logits = network(X_batch) # is_train=False, disable dropout + val_loss += loss_fn(_logits, y_batch, name='eval_loss') + if metrics: + val_acc += metrics(_logits, y_batch) + else: + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + + def ms_train( + self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, + print_freq, test_dataset + ): + net_with_criterion = WithLoss(network, loss_fn) + train_network = GradWrap(net_with_criterion) + train_network.set_train() + for epoch in range(n_epoch): + start_time = time.time() + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_dataset: + X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) + y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) + output = network(X_batch) + loss_output = loss_fn(output, y_batch) + grads = train_network(X_batch, y_batch) + success = optimizer.apply_gradients(zip(grads, train_weights)) + loss = loss_output.asnumpy() + train_loss += loss + if metrics: + train_acc += metrics(output, y_batch) + else: + train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + n_iter += 1 + + if print_train_batch: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + if test_dataset: + # use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + network.eval() + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_dataset: + _logits = network(X_batch) + val_loss += loss_fn(_logits, y_batch, name='eval_loss') + if metrics: + val_acc += metrics(_logits, y_batch) + else: + val_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + + +class WithLoss(Module): + + def __init__(self, backbone, loss_fn): + super(WithLoss, self).__init__() + self._backbone = backbone + self._loss_fn = loss_fn + + def construct(self, data, label): + out = self._backbone(data) + return self._loss_fn(out, label) + + @property + def backbone_network(self): + return self._backbone + + +class GradWrap(Module): + """ GradWrap definition """ + + def __init__(self, network): + super(GradWrap, self).__init__(auto_prefix=False) + self.network = network + self.weights = ParameterTuple(network.trainable_weights) + + def forward(self, x, label): + return composite.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) diff --git a/tensorlayer/models/imagenet_class_index.json b/tensorlayer/models/imagenet_class_index.json deleted file mode 100644 index 5fe0dfefc..000000000 --- a/tensorlayer/models/imagenet_class_index.json +++ /dev/null @@ -1 +0,0 @@ -{"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]} \ No newline at end of file diff --git a/tensorlayer/models/imagenet_classes.py b/tensorlayer/models/imagenet_classes.py deleted file mode 100644 index d13cfda4a..000000000 --- a/tensorlayer/models/imagenet_classes.py +++ /dev/null @@ -1,1003 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -class_names = '''tench, Tinca tinca -goldfish, Carassius auratus -great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias -tiger shark, Galeocerdo cuvieri -hammerhead, hammerhead shark -electric ray, crampfish, numbfish, torpedo -stingray -cock -hen -ostrich, Struthio camelus -brambling, Fringilla montifringilla -goldfinch, Carduelis carduelis -house finch, linnet, Carpodacus mexicanus -junco, snowbird -indigo bunting, indigo finch, indigo bird, Passerina cyanea -robin, American robin, Turdus migratorius -bulbul -jay -magpie -chickadee -water ouzel, dipper -kite -bald eagle, American eagle, Haliaeetus leucocephalus -vulture -great grey owl, great gray owl, Strix nebulosa -European fire salamander, Salamandra salamandra -common newt, Triturus vulgaris -eft -spotted salamander, Ambystoma maculatum -axolotl, mud puppy, Ambystoma mexicanum -bullfrog, Rana catesbeiana -tree frog, tree-frog -tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui -loggerhead, loggerhead turtle, Caretta caretta -leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea -mud turtle -terrapin -box turtle, box tortoise -banded gecko -common iguana, iguana, Iguana iguana -American chameleon, anole, Anolis carolinensis -whiptail, whiptail lizard -agama -frilled lizard, Chlamydosaurus kingi -alligator lizard -Gila monster, Heloderma suspectum -green lizard, Lacerta viridis -African chameleon, Chamaeleo chamaeleon -Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis -African crocodile, Nile crocodile, Crocodylus niloticus -American alligator, Alligator mississipiensis -triceratops -thunder snake, worm snake, Carphophis amoenus -ringneck snake, ring-necked snake, ring snake -hognose snake, puff adder, sand viper -green snake, grass snake -king snake, kingsnake -garter snake, grass snake -water snake -vine snake -night snake, Hypsiglena torquata -boa constrictor, Constrictor constrictor -rock python, rock snake, Python sebae -Indian cobra, Naja naja -green mamba -sea snake -horned viper, cerastes, sand viper, horned asp, Cerastes cornutus -diamondback, diamondback rattlesnake, Crotalus adamanteus -sidewinder, horned rattlesnake, Crotalus cerastes -trilobite -harvestman, daddy longlegs, Phalangium opilio -scorpion -black and gold garden spider, Argiope aurantia -barn spider, Araneus cavaticus -garden spider, Aranea diademata -black widow, Latrodectus mactans -tarantula -wolf spider, hunting spider -tick -centipede -black grouse -ptarmigan -ruffed grouse, partridge, Bonasa umbellus -prairie chicken, prairie grouse, prairie fowl -peacock -quail -partridge -African grey, African gray, Psittacus erithacus -macaw -sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita -lorikeet -coucal -bee eater -hornbill -hummingbird -jacamar -toucan -drake -red-breasted merganser, Mergus serrator -goose -black swan, Cygnus atratus -tusker -echidna, spiny anteater, anteater -platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus -wallaby, brush kangaroo -koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus -wombat -jellyfish -sea anemone, anemone -brain coral -flatworm, platyhelminth -nematode, nematode worm, roundworm -conch -snail -slug -sea slug, nudibranch -chiton, coat-of-mail shell, sea cradle, polyplacophore -chambered nautilus, pearly nautilus, nautilus -Dungeness crab, Cancer magister -rock crab, Cancer irroratus -fiddler crab -king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica -American lobster, Northern lobster, Maine lobster, Homarus americanus -spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish -crayfish, crawfish, crawdad, crawdaddy -hermit crab -isopod -white stork, Ciconia ciconia -black stork, Ciconia nigra -spoonbill -flamingo -little blue heron, Egretta caerulea -American egret, great white heron, Egretta albus -bittern -crane -limpkin, Aramus pictus -European gallinule, Porphyrio porphyrio -American coot, marsh hen, mud hen, water hen, Fulica americana -bustard -ruddy turnstone, Arenaria interpres -red-backed sandpiper, dunlin, Erolia alpina -redshank, Tringa totanus -dowitcher -oystercatcher, oyster catcher -pelican -king penguin, Aptenodytes patagonica -albatross, mollymawk -grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus -killer whale, killer, orca, grampus, sea wolf, Orcinus orca -dugong, Dugong dugon -sea lion -Chihuahua -Japanese spaniel -Maltese dog, Maltese terrier, Maltese -Pekinese, Pekingese, Peke -Shih-Tzu -Blenheim spaniel -papillon -toy terrier -Rhodesian ridgeback -Afghan hound, Afghan -basset, basset hound -beagle -bloodhound, sleuthhound -bluetick -black-and-tan coonhound -Walker hound, Walker foxhound -English foxhound -redbone -borzoi, Russian wolfhound -Irish wolfhound -Italian greyhound -whippet -Ibizan hound, Ibizan Podenco -Norwegian elkhound, elkhound -otterhound, otter hound -Saluki, gazelle hound -Scottish deerhound, deerhound -Weimaraner -Staffordshire bullterrier, Staffordshire bull terrier -American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier -Bedlington terrier -Border terrier -Kerry blue terrier -Irish terrier -Norfolk terrier -Norwich terrier -Yorkshire terrier -wire-haired fox terrier -Lakeland terrier -Sealyham terrier, Sealyham -Airedale, Airedale terrier -cairn, cairn terrier -Australian terrier -Dandie Dinmont, Dandie Dinmont terrier -Boston bull, Boston terrier -miniature schnauzer -giant schnauzer -standard schnauzer -Scotch terrier, Scottish terrier, Scottie -Tibetan terrier, chrysanthemum dog -silky terrier, Sydney silky -soft-coated wheaten terrier -West Highland white terrier -Lhasa, Lhasa apso -flat-coated retriever -curly-coated retriever -golden retriever -Labrador retriever -Chesapeake Bay retriever -German short-haired pointer -vizsla, Hungarian pointer -English setter -Irish setter, red setter -Gordon setter -Brittany spaniel -clumber, clumber spaniel -English springer, English springer spaniel -Welsh springer spaniel -cocker spaniel, English cocker spaniel, cocker -Sussex spaniel -Irish water spaniel -kuvasz -schipperke -groenendael -malinois -briard -kelpie -komondor -Old English sheepdog, bobtail -Shetland sheepdog, Shetland sheep dog, Shetland -collie -Border collie -Bouvier des Flandres, Bouviers des Flandres -Rottweiler -German shepherd, German shepherd dog, German police dog, alsatian -Doberman, Doberman pinscher -miniature pinscher -Greater Swiss Mountain dog -Bernese mountain dog -Appenzeller -EntleBucher -boxer -bull mastiff -Tibetan mastiff -French bulldog -Great Dane -Saint Bernard, St Bernard -Eskimo dog, husky -malamute, malemute, Alaskan malamute -Siberian husky -dalmatian, coach dog, carriage dog -affenpinscher, monkey pinscher, monkey dog -basenji -pug, pug-dog -Leonberg -Newfoundland, Newfoundland dog -Great Pyrenees -Samoyed, Samoyede -Pomeranian -chow, chow chow -keeshond -Brabancon griffon -Pembroke, Pembroke Welsh corgi -Cardigan, Cardigan Welsh corgi -toy poodle -miniature poodle -standard poodle -Mexican hairless -timber wolf, grey wolf, gray wolf, Canis lupus -white wolf, Arctic wolf, Canis lupus tundrarum -red wolf, maned wolf, Canis rufus, Canis niger -coyote, prairie wolf, brush wolf, Canis latrans -dingo, warrigal, warragal, Canis dingo -dhole, Cuon alpinus -African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus -hyena, hyaena -red fox, Vulpes vulpes -kit fox, Vulpes macrotis -Arctic fox, white fox, Alopex lagopus -grey fox, gray fox, Urocyon cinereoargenteus -tabby, tabby cat -tiger cat -Persian cat -Siamese cat, Siamese -Egyptian cat -cougar, puma, catamount, mountain lion, painter, panther, Felis concolor -lynx, catamount -leopard, Panthera pardus -snow leopard, ounce, Panthera uncia -jaguar, panther, Panthera onca, Felis onca -lion, king of beasts, Panthera leo -tiger, Panthera tigris -cheetah, chetah, Acinonyx jubatus -brown bear, bruin, Ursus arctos -American black bear, black bear, Ursus americanus, Euarctos americanus -ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus -sloth bear, Melursus ursinus, Ursus ursinus -mongoose -meerkat, mierkat -tiger beetle -ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle -ground beetle, carabid beetle -long-horned beetle, longicorn, longicorn beetle -leaf beetle, chrysomelid -dung beetle -rhinoceros beetle -weevil -fly -bee -ant, emmet, pismire -grasshopper, hopper -cricket -walking stick, walkingstick, stick insect -cockroach, roach -mantis, mantid -cicada, cicala -leafhopper -lacewing, lacewing fly -dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk -damselfly -admiral -ringlet, ringlet butterfly -monarch, monarch butterfly, milkweed butterfly, Danaus plexippus -cabbage butterfly -sulphur butterfly, sulfur butterfly -lycaenid, lycaenid butterfly -starfish, sea star -sea urchin -sea cucumber, holothurian -wood rabbit, cottontail, cottontail rabbit -hare -Angora, Angora rabbit -hamster -porcupine, hedgehog -fox squirrel, eastern fox squirrel, Sciurus niger -marmot -beaver -guinea pig, Cavia cobaya -sorrel -zebra -hog, pig, grunter, squealer, Sus scrofa -wild boar, boar, Sus scrofa -warthog -hippopotamus, hippo, river horse, Hippopotamus amphibius -ox -water buffalo, water ox, Asiatic buffalo, Bubalus bubalis -bison -ram, tup -bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis -ibex, Capra ibex -hartebeest -impala, Aepyceros melampus -gazelle -Arabian camel, dromedary, Camelus dromedarius -llama -weasel -mink -polecat, fitch, foulmart, foumart, Mustela putorius -black-footed ferret, ferret, Mustela nigripes -otter -skunk, polecat, wood pussy -badger -armadillo -three-toed sloth, ai, Bradypus tridactylus -orangutan, orang, orangutang, Pongo pygmaeus -gorilla, Gorilla gorilla -chimpanzee, chimp, Pan troglodytes -gibbon, Hylobates lar -siamang, Hylobates syndactylus, Symphalangus syndactylus -guenon, guenon monkey -patas, hussar monkey, Erythrocebus patas -baboon -macaque -langur -colobus, colobus monkey -proboscis monkey, Nasalis larvatus -marmoset -capuchin, ringtail, Cebus capucinus -howler monkey, howler -titi, titi monkey -spider monkey, Ateles geoffroyi -squirrel monkey, Saimiri sciureus -Madagascar cat, ring-tailed lemur, Lemur catta -indri, indris, Indri indri, Indri brevicaudatus -Indian elephant, Elephas maximus -African elephant, Loxodonta africana -lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens -giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca -barracouta, snoek -eel -coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch -rock beauty, Holocanthus tricolor -anemone fish -sturgeon -gar, garfish, garpike, billfish, Lepisosteus osseus -lionfish -puffer, pufferfish, blowfish, globefish -abacus -abaya -academic gown, academic robe, judge's robe -accordion, piano accordion, squeeze box -acoustic guitar -aircraft carrier, carrier, flattop, attack aircraft carrier -airliner -airship, dirigible -altar -ambulance -amphibian, amphibious vehicle -analog clock -apiary, bee house -apron -ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin -assault rifle, assault gun -backpack, back pack, knapsack, packsack, rucksack, haversack -bakery, bakeshop, bakehouse -balance beam, beam -balloon -ballpoint, ballpoint pen, ballpen, Biro -Band Aid -banjo -bannister, banister, balustrade, balusters, handrail -barbell -barber chair -barbershop -barn -barometer -barrel, cask -barrow, garden cart, lawn cart, wheelbarrow -baseball -basketball -bassinet -bassoon -bathing cap, swimming cap -bath towel -bathtub, bathing tub, bath, tub -beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon -beacon, lighthouse, beacon light, pharos -beaker -bearskin, busby, shako -beer bottle -beer glass -bell cote, bell cot -bib -bicycle-built-for-two, tandem bicycle, tandem -bikini, two-piece -binder, ring-binder -binoculars, field glasses, opera glasses -birdhouse -boathouse -bobsled, bobsleigh, bob -bolo tie, bolo, bola tie, bola -bonnet, poke bonnet -bookcase -bookshop, bookstore, bookstall -bottlecap -bow -bow tie, bow-tie, bowtie -brass, memorial tablet, plaque -brassiere, bra, bandeau -breakwater, groin, groyne, mole, bulwark, seawall, jetty -breastplate, aegis, egis -broom -bucket, pail -buckle -bulletproof vest -bullet train, bullet -butcher shop, meat market -cab, hack, taxi, taxicab -caldron, cauldron -candle, taper, wax light -cannon -canoe -can opener, tin opener -cardigan -car mirror -carousel, carrousel, merry-go-round, roundabout, whirligig -carpenter's kit, tool kit -carton -car wheel -cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM -cassette -cassette player -castle -catamaran -CD player -cello, violoncello -cellular telephone, cellular phone, cellphone, cell, mobile phone -chain -chainlink fence -chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour -chain saw, chainsaw -chest -chiffonier, commode -chime, bell, gong -china cabinet, china closet -Christmas stocking -church, church building -cinema, movie theater, movie theatre, movie house, picture palace -cleaver, meat cleaver, chopper -cliff dwelling -cloak -clog, geta, patten, sabot -cocktail shaker -coffee mug -coffeepot -coil, spiral, volute, whorl, helix -combination lock -computer keyboard, keypad -confectionery, confectionary, candy store -container ship, containership, container vessel -convertible -corkscrew, bottle screw -cornet, horn, trumpet, trump -cowboy boot -cowboy hat, ten-gallon hat -cradle -crane -crash helmet -crate -crib, cot -Crock Pot -croquet ball -crutch -cuirass -dam, dike, dyke -desk -desktop computer -dial telephone, dial phone -diaper, nappy, napkin -digital clock -digital watch -dining table, board -dishrag, dishcloth -dishwasher, dish washer, dishwashing machine -disk brake, disc brake -dock, dockage, docking facility -dogsled, dog sled, dog sleigh -dome -doormat, welcome mat -drilling platform, offshore rig -drum, membranophone, tympan -drumstick -dumbbell -Dutch oven -electric fan, blower -electric guitar -electric locomotive -entertainment center -envelope -espresso maker -face powder -feather boa, boa -file, file cabinet, filing cabinet -fireboat -fire engine, fire truck -fire screen, fireguard -flagpole, flagstaff -flute, transverse flute -folding chair -football helmet -forklift -fountain -fountain pen -four-poster -freight car -French horn, horn -frying pan, frypan, skillet -fur coat -garbage truck, dustcart -gasmask, respirator, gas helmet -gas pump, gasoline pump, petrol pump, island dispenser -goblet -go-kart -golf ball -golfcart, golf cart -gondola -gong, tam-tam -gown -grand piano, grand -greenhouse, nursery, glasshouse -grille, radiator grille -grocery store, grocery, food market, market -guillotine -hair slide -hair spray -half track -hammer -hamper -hand blower, blow dryer, blow drier, hair dryer, hair drier -hand-held computer, hand-held microcomputer -handkerchief, hankie, hanky, hankey -hard disc, hard disk, fixed disk -harmonica, mouth organ, harp, mouth harp -harp -harvester, reaper -hatchet -holster -home theater, home theatre -honeycomb -hook, claw -hoopskirt, crinoline -horizontal bar, high bar -horse cart, horse-cart -hourglass -iPod -iron, smoothing iron -jack-o'-lantern -jean, blue jean, denim -jeep, landrover -jersey, T-shirt, tee shirt -jigsaw puzzle -jinrikisha, ricksha, rickshaw -joystick -kimono -knee pad -knot -lab coat, laboratory coat -ladle -lampshade, lamp shade -laptop, laptop computer -lawn mower, mower -lens cap, lens cover -letter opener, paper knife, paperknife -library -lifeboat -lighter, light, igniter, ignitor -limousine, limo -liner, ocean liner -lipstick, lip rouge -Loafer -lotion -loudspeaker, speaker, speaker unit, loudspeaker system, speaker system -loupe, jeweler's loupe -lumbermill, sawmill -magnetic compass -mailbag, postbag -mailbox, letter box -maillot -maillot, tank suit -manhole cover -maraca -marimba, xylophone -mask -matchstick -maypole -maze, labyrinth -measuring cup -medicine chest, medicine cabinet -megalith, megalithic structure -microphone, mike -microwave, microwave oven -military uniform -milk can -minibus -miniskirt, mini -minivan -missile -mitten -mixing bowl -mobile home, manufactured home -Model T -modem -monastery -monitor -moped -mortar -mortarboard -mosque -mosquito net -motor scooter, scooter -mountain bike, all-terrain bike, off-roader -mountain tent -mouse, computer mouse -mousetrap -moving van -muzzle -nail -neck brace -necklace -nipple -notebook, notebook computer -obelisk -oboe, hautboy, hautbois -ocarina, sweet potato -odometer, hodometer, mileometer, milometer -oil filter -organ, pipe organ -oscilloscope, scope, cathode-ray oscilloscope, CRO -overskirt -oxcart -oxygen mask -packet -paddle, boat paddle -paddlewheel, paddle wheel -padlock -paintbrush -pajama, pyjama, pj's, jammies -palace -panpipe, pandean pipe, syrinx -paper towel -parachute, chute -parallel bars, bars -park bench -parking meter -passenger car, coach, carriage -patio, terrace -pay-phone, pay-station -pedestal, plinth, footstall -pencil box, pencil case -pencil sharpener -perfume, essence -Petri dish -photocopier -pick, plectrum, plectron -pickelhaube -picket fence, paling -pickup, pickup truck -pier -piggy bank, penny bank -pill bottle -pillow -ping-pong ball -pinwheel -pirate, pirate ship -pitcher, ewer -plane, carpenter's plane, woodworking plane -planetarium -plastic bag -plate rack -plow, plough -plunger, plumber's helper -Polaroid camera, Polaroid Land camera -pole -police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria -poncho -pool table, billiard table, snooker table -pop bottle, soda bottle -pot, flowerpot -potter's wheel -power drill -prayer rug, prayer mat -printer -prison, prison house -projectile, missile -projector -puck, hockey puck -punching bag, punch bag, punching ball, punchball -purse -quill, quill pen -quilt, comforter, comfort, puff -racer, race car, racing car -racket, racquet -radiator -radio, wireless -radio telescope, radio reflector -rain barrel -recreational vehicle, RV, R.V. -reel -reflex camera -refrigerator, icebox -remote control, remote -restaurant, eating house, eating place, eatery -revolver, six-gun, six-shooter -rifle -rocking chair, rocker -rotisserie -rubber eraser, rubber, pencil eraser -rugby ball -rule, ruler -running shoe -safe -safety pin -saltshaker, salt shaker -sandal -sarong -sax, saxophone -scabbard -scale, weighing machine -school bus -schooner -scoreboard -screen, CRT screen -screw -screwdriver -seat belt, seatbelt -sewing machine -shield, buckler -shoe shop, shoe-shop, shoe store -shoji -shopping basket -shopping cart -shovel -shower cap -shower curtain -ski -ski mask -sleeping bag -slide rule, slipstick -sliding door -slot, one-armed bandit -snorkel -snowmobile -snowplow, snowplough -soap dispenser -soccer ball -sock -solar dish, solar collector, solar furnace -sombrero -soup bowl -space bar -space heater -space shuttle -spatula -speedboat -spider web, spider's web -spindle -sports car, sport car -spotlight, spot -stage -steam locomotive -steel arch bridge -steel drum -stethoscope -stole -stone wall -stopwatch, stop watch -stove -strainer -streetcar, tram, tramcar, trolley, trolley car -stretcher -studio couch, day bed -stupa, tope -submarine, pigboat, sub, U-boat -suit, suit of clothes -sundial -sunglass -sunglasses, dark glasses, shades -sunscreen, sunblock, sun blocker -suspension bridge -swab, swob, mop -sweatshirt -swimming trunks, bathing trunks -swing -switch, electric switch, electrical switch -syringe -table lamp -tank, army tank, armored combat vehicle, armoured combat vehicle -tape player -teapot -teddy, teddy bear -television, television system -tennis ball -thatch, thatched roof -theater curtain, theatre curtain -thimble -thresher, thrasher, threshing machine -throne -tile roof -toaster -tobacco shop, tobacconist shop, tobacconist -toilet seat -torch -totem pole -tow truck, tow car, wrecker -toyshop -tractor -trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi -tray -trench coat -tricycle, trike, velocipede -trimaran -tripod -triumphal arch -trolleybus, trolley coach, trackless trolley -trombone -tub, vat -turnstile -typewriter keyboard -umbrella -unicycle, monocycle -upright, upright piano -vacuum, vacuum cleaner -vase -vault -velvet -vending machine -vestment -viaduct -violin, fiddle -volleyball -waffle iron -wall clock -wallet, billfold, notecase, pocketbook -wardrobe, closet, press -warplane, military plane -washbasin, handbasin, washbowl, lavabo, wash-hand basin -washer, automatic washer, washing machine -water bottle -water jug -water tower -whiskey jug -whistle -wig -window screen -window shade -Windsor tie -wine bottle -wing -wok -wooden spoon -wool, woolen, woollen -worm fence, snake fence, snake-rail fence, Virginia fence -wreck -yawl -yurt -web site, website, internet site, site -comic book -crossword puzzle, crossword -street sign -traffic light, traffic signal, stoplight -book jacket, dust cover, dust jacket, dust wrapper -menu -plate -guacamole -consomme -hot pot, hotpot -trifle -ice cream, icecream -ice lolly, lolly, lollipop, popsicle -French loaf -bagel, beigel -pretzel -cheeseburger -hotdog, hot dog, red hot -mashed potato -head cabbage -broccoli -cauliflower -zucchini, courgette -spaghetti squash -acorn squash -butternut squash -cucumber, cuke -artichoke, globe artichoke -bell pepper -cardoon -mushroom -Granny Smith -strawberry -orange -lemon -fig -pineapple, ananas -banana -jackfruit, jak, jack -custard apple -pomegranate -hay -carbonara -chocolate sauce, chocolate syrup -dough -meat loaf, meatloaf -pizza, pizza pie -potpie -burrito -red wine -espresso -cup -eggnog -alp -bubble -cliff, drop, drop-off -coral reef -geyser -lakeside, lakeshore -promontory, headland, head, foreland -sandbar, sand bar -seashore, coast, seacoast, sea-coast -valley, vale -volcano -ballplayer, baseball player -groom, bridegroom -scuba diver -rapeseed -daisy -yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum -corn -acorn -hip, rose hip, rosehip -buckeye, horse chestnut, conker -coral fungus -agaric -gyromitra -stinkhorn, carrion fungus -earthstar -hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa -bolete -ear, spike, capitulum -toilet tissue, toilet paper, bathroom tissue'''.split("\n") diff --git a/tensorlayer/models/mobilenetv1.py b/tensorlayer/models/mobilenetv1.py deleted file mode 100644 index fd169b025..000000000 --- a/tensorlayer/models/mobilenetv1.py +++ /dev/null @@ -1,118 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""MobileNet for ImageNet.""" - -import os - -import tensorflow as tf - -from tensorlayer import logging -from tensorlayer.files import (assign_weights, load_npz, maybe_download_and_extract) -from tensorlayer.layers import (BatchNorm, Conv2d, DepthwiseConv2d, Flatten, GlobalMeanPool2d, Input, Reshape) -from tensorlayer.models import Model - -__all__ = [ - 'MobileNetV1', -] - -layer_names = [ - 'conv', 'depth1', 'depth2', 'depth3', 'depth4', 'depth5', 'depth6', 'depth7', 'depth8', 'depth9', 'depth10', - 'depth11', 'depth12', 'depth13', 'globalmeanpool', 'reshape', 'out' -] -n_filters = [32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024] - - -def conv_block(n, n_filter, filter_size=(3, 3), strides=(1, 1), name='conv_block'): - # ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py - n = Conv2d(n_filter, filter_size, strides, b_init=None, name=name + '.conv')(n) - n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm')(n) - return n - - -def depthwise_conv_block(n, n_filter, strides=(1, 1), name="depth_block"): - n = DepthwiseConv2d((3, 3), strides, b_init=None, name=name + '.depthwise')(n) - n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm1')(n) - n = Conv2d(n_filter, (1, 1), (1, 1), b_init=None, name=name + '.conv')(n) - n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm2')(n) - return n - - -def restore_params(network, path='models'): - logging.info("Restore pre-trained parameters") - maybe_download_and_extract( - 'mobilenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/', - expected_bytes=25600116 - ) # ls -al - params = load_npz(name=os.path.join(path, 'mobilenet.npz')) - # for idx, net_weight in enumerate(network.all_weights): - # if 'batchnorm' in net_weight.name: - # params[idx] = params[idx].reshape(1, 1, 1, -1) - assign_weights(params[:len(network.all_weights)], network) - del params - - -def MobileNetV1(pretrained=False, end_with='out', name=None): - """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1]. - - Parameters - ---------- - pretrained : boolean - Whether to load pretrained weights. Default False. - end_with : str - The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. Default ``out`` i.e. the whole model. - name : None or str - Name for this model. - - Examples - --------- - Classify ImageNet classes, see `tutorial_models_mobilenetv1.py `__ - - >>> # get the whole model with pretrained weights - >>> mobilenetv1 = tl.models.MobileNetV1(pretrained=True) - >>> # use for inferencing - >>> output = mobilenetv1(img1, is_train=False) - >>> prob = tf.nn.softmax(output)[0].numpy() - - Extract features and Train a classifier with 100 classes - - >>> # get model without the last layer - >>> cnn = tl.models.MobileNetV1(pretrained=True, end_with='reshape').as_layer() - >>> # add one more layer and build new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = Conv2d(100, (1, 1), (1, 1), name='out')(nn) - >>> nn = Flatten(name='flatten')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Returns - ------- - static MobileNetV1. - """ - ni = Input([None, 224, 224, 3], name="input") - - for i in range(len(layer_names)): - if i == 0: - n = conv_block(ni, n_filters[i], strides=(2, 2), name=layer_names[i]) - elif layer_names[i] in ['depth2', 'depth4', 'depth6', 'depth12']: - n = depthwise_conv_block(n, n_filters[i], strides=(2, 2), name=layer_names[i]) - elif layer_names[i] == 'globalmeanpool': - n = GlobalMeanPool2d(name='globalmeanpool')(n) - elif layer_names[i] == 'reshape': - n = Reshape([-1, 1, 1, 1024], name='reshape')(n) - elif layer_names[i] == 'out': - n = Conv2d(1000, (1, 1), (1, 1), name='out')(n) - n = Flatten(name='flatten')(n) - else: - n = depthwise_conv_block(n, n_filters[i], name=layer_names[i]) - - if layer_names[i] == end_with: - break - - network = Model(inputs=ni, outputs=n, name=name) - - if pretrained: - restore_params(network) - - return network diff --git a/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 b/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 new file mode 100644 index 0000000000000000000000000000000000000000..904349fea3fbc3f8267a7cc70c5b7d7f464781e5 GIT binary patch literal 24576 zcmeFXd0bD=|2CfXy`+fJqD4Zx)a%TgRwN{mH5C=Il~5wIX_Y8Nq)3ZaQd(Z~oRh5( zvdg|^sif@L?^L|={@maD_s8#kJbuglJ%5}z&-1)y=9-ynX3or+SO@#@GLl0i1%6%P z;sU|~^4*WWZ$EplDql^}U+;d~zx|%Q5B++>nBMS^V9!Ipt`vXylzZRH_olBQ)brwU zPtF1}T*i$P=qL0i{eS19vHo~>itBpQakkGE`So~Qxc9}n9{=C$dwu^M{C~a%9LCu= zcNapcrzlmuy3w0i)&1hX(%E+H9%Y91MC;Px|6GrUhWFrh_u_sB{d@KLZ(U0Z?3Vl` zy?z4H0^(ijUcMmMC%|Lnf)zfiyQ2>1N>1*Vzh6(frM(IM4t-bpuU+F|-1}Bwy-$Fj z{{nAs-<69^ygiJ){8k43@d*elTu7AzJUwY`1-BvPGH4?b^d;S%X^~=1o*5r zS>UlSV8KeSB_<2H;`X%!`ouNq%KR_z!oR=^1^R+b{|)x~D{dFrz8QJ_W&SG33ZH<* zKL3=r4-K>4lo$TRxsTPC#+ZMDyE*>>|IOL_-(as_B>#ZBNcOP$RT14+A68?#Ui_*Y zufJIJvHG&I_&2zl)gSQRtStWx?q>A|{D;-A8t=aPurfC4W@Y+U`}Vc^vNG}&l+qbr5K zz}=kxME$!ajl0zF7x-6A{*&^bn*1f2|6tW6j$d*A)O$CpKGt8Xy43I&xSQ1<@V{7f zso^hhH>*G3KdgSK^FLU1iQ`vX@4l+*YxQMi(kq5P;BHoZtiQm$V)*0kX4S{)?F+wj z|8G_%J=!q)OLcv%zcT7+?tk3BSoO930{1lcKkjbT{V}^$_s8l{-5>KG)&1&z|G~LO z8)ko`^Q(RPT7S{#5yL<3Zq9$A{@uPNJ!1F={HrGWTD>*-tDpY|s~&Bb{ZT_Vt3K9W zta`-o54fAvAMoF-Onb!e54fAvAMhX5{Tf03&C0Y#8)kph(9Nok)t8lNuNeNgyIJ+I z{sQ-k;g7qURUhm3yzr|re_egXI@4Zl`1I`yW__%`z`f1=$KB1UkM$S0x4HkgyIJ+I zezW>LG4y5C+xmY zzEp69&L>I({s96^|Ah9)gy*~z}>9= zfd9p+M-2afyIK7K|I~#zL_j>GiP5hr3{$7vG?k&D}Z#es3 zvg=v>2naa;Z>py0PE+lFX}GI1bzhfDd&1xO#GL4{51r}>jRbo_|L3yZ55I95V!u{r zU0WXCd)|BY;yPq|-v7p3*6ZmQ9r``t_Fmj^vmt+_XJp@FJCE-PyPf|pzWrX;D)$s$ zz@oR^qy-$hYWLT=Rz^UvYh4@U$m^^s@{J0pGUpp{qG+H(p_H)zvrJ{`>Ow*e}3(&N^~XudtbG8Iu`gH z{^8m4@DIU%{nz--ub+Tu_y38W+pl@gq1)9@xTi#7y};jL?{wES`}JJE-~SH(?HB4Q z!>^S3Yp8dZOF%$FV34@Lpv}GUMY?9wUw%PB!R`!ymHF3$P`}wKy^!SIuscxboqc~{ z`pXo|66odI7xRDTx##;zus4^klm`E=Q~NEG*(LvL-2Z7_*?<@5TdXY@Cf4yTf7ph^ zp&Hd(*+;YducM)*oy^>!tI_N0M$F6hCOFVO4fRtRO3n?>hn6|j>{v?`hbORjKm!qTsjxc$J8?^6v zL1wDu0M3{S6SaJa=0zFgrn(DXb2dZG7i6&8P6p+f#Pgz!qj@FUGa0s?qQznTF$qm2 z3NMwQ!r2gn>}Jz1nq~aMj>nn$y-qmi(?TZFM;1vx-j1Ero?_#ib?C+LRwnQA60kA$ zC;qW=0IF$BigFbA7@nn5(?7wX>v=G1haFB9GKZ%JUlN2?i9BRC3 zhfL@GAY5lUZ(@QL_Euq-D7|vx@OmJ>$}pMS$k{|5L-LlvB8ziH)+PfXXxFl+l1#KhB9J5k|7s)XiP#e3bYZ2eunD!ZNEJJ7tJio*Zk)w z^6*|Iukx2&a--hz8?*^MkN+9}7otZL97d6LwqkNlKl=VrS zbPk(9Z@n^N&R!l4N&Ff*2Ngo%&dVrZLj_v#sSFlM1c2$wM96(C1zERkNwC=}s=93( z>|Jn{G@aC<``E3tu)-Al;+OEB`uY*?FLr3`qVs6v*bs8eV;$V*Qt8%h1YOG~mYbR< z!^9`2c@Yib^khF1o`0nduKLo#w=QWu}v2(6}*k2$Fb;Dh943%x`Un>mBS+6NVwE-7!6Y?fQ_$O zVUyw;5|F0@Te(yezpj}%&|Jkx2W&(6$^&U)h$-Cm%3w}URY8{q?PPcmK=uY2u>cZFKvx!=a&=;h`_r7{|cjB0`SJYnQ+djnZ0%F}n6o6yvyulZ@a z-;t}H(x4k)g}!H}p+X@Sd?_`VSv%}8vyE4bFN=obXx9gfO#_RTy>11Ykio>@!FyyT zxEalfjzs+*=+mJOKSRQrRhd5S<()YRu-yXfXwX1P3-5sX z1#kQ=!W=7`t|3+%jj>Y63$pp806%VKS@o-Ba} zRG^xRh9H?)^7N&T5y+P{qRr_tT>KUlG-Xd3Gjpjoiht#Vva1t7UL+k^K2@XR;^G;T zPgsKz~G#c1b{Jd};~F}3``Sj;PfCb8E{=)^{{I+l;T&CkN%8PX8N+yZNh%kaUV z8J%peVFrhdphp+)hSoDq5IX-I891PTe^dQERGc3U_CmJ!;VVCK2!8N32oFRnri2iu za~F6w)0NSC9d#!4Llm-=i(#ZBwxOzVhD2q+0Up%vMTWCniO`Q>XvU=&{#XxhxEJ(| z88$T+Hrcn}r3(=mAMG+kd=>49rWO_>&Z;FZn z!Pf&o$uk}dT8qIkNE#>KzfFWx6G5r96OB6=3lbNa(F%7JoOP|9zR4O1S)2pkJl6q+ z3D3ipsa1I2lwEig&jlqcE<@#3%?ycFL094epk>4aD)@6Sx}*7t=aQvJP8nRO-e!H& z@{EKcK6HKxekL{%?cK>EHDZVG8|Qf37@5ITCLKWcR0XKpKxdR3mrlkG+ynIo)N#CY zG#zp<`kl*mbr)J}4Pn{W51c9{25Xb)k#`xiv%x=PzB4=eGExAqh@6LdF?3vy)8f^6* zwe7EFP7W6*msk&?G)RSVX===fLV0MBw?pG4$2UHka zD&dZ|z}FG?QGJIb9d|_q+!J3S$xnIcXo@jj<`IvSu2^CNfrT)UEGGd9XGm4yBzjRg z3<;DDW!j(_uFu|#o+vcJPw}BJHM^6nh#QY@7i=KZNr`(uNt|B0t!(+)(VcGm5e#h` z((qT_LG12w11*+TWClDlhXj3bbjM;Ob4y?x9Tuxh>tjtJX`=vnDD{asS*=RLW45B4 zIj(qCtR%Ww=gv1QE5kyr4p{YqN%cUcjnPtj0fE}hs4Ya5E?7RDrg%r84Qnn!%9a?I zT$qhdo_c}8RLA2l0`E}efnx978AxGcRDY5xc!-*<v2p+3={o+3SRaA!?N~l>Xr9^HZmLuus#d56SkmQhf*Bwn8UcuSpvFqcu3K{4h5}Z zV0Dlv-F0&+H$(p@O|ZW~Hk&%56%|6r*|ZMCljoD0K7v$<6-6H5>dZ2OrQFsFA~Z$H z2nE=0Lu=3duzWgS3}4vgNYDS=OpSIMq39z6>4}Z?5O9{k4Hq6k$ObJ;mc2(roG=-Y zL$MP-4(3KWK+I4*s40E}THG7NJs+?q+zx9hp+@`XfvMgKf*4_lAeOOe=hPAz%i)QTkf1wdKj8usOuc>2vK zlous@24?(-2A)V5p1d%Nt`Q4G_2pZL%o$(C<%KxEOh=2lkME?rVpXWZDP_y14^I3f znIF*MVn@?-=TOye(qKF83Uhs|1xybgz(#&CfPJ&OzUP%~u=bh&7(Bm&Iel&lO^LaS z!d(0yB~1n-eC^PDIaBO@Zycok)U-S6OH zK=dmmFY>MFg>g=F;HqQfLgx(Jp*RY%HxJ_k?cJfHT!UOr(xQ)wMnOx>QM9UM0ABQX z3*-9;s%19~!mBz&P_BXmewaUz@|kSL*6}Q}*gTosRrF?teq96(du!-v8H{s7&hf;D zV_wCmblg}y1AFCd=Ep~7pp7ORys-R$0vfN;+1ggvBs?3;d^G5;$~uJeJMrcI`%uAD z1saz#8r0S%VoiqzlCV}3c=z`q;h%ntPzgdhQ>EF46H@pE-cB~hJfQO5J`=wG8_cPV zCu_E!V#3nw$bgRqP_#6kh$_lLwSF4e?HgZxKXekwylG5RX1)Vk_rvsNnFz94?1XRM zHiZQd)zl%-lq#I9AX;^YpzUW3x+X4$4sY7VcT2kfS5<1zLhDQTMvgixUN45_wOqLy zrEN%Z&0g4UKOUV=&0%WX#Bf3SK02~Rjz<0H&x{F3=6~s2#+81$17c(N%-ftol;1Rq zS-$)_DowAYW`jI&OprRQdE-g_9cH7$@>{UM+d%sC&@<|qF#72|quJ4qF z4Yxq%{uL&6@(g@1-U^MfyA7#3bs7!COz$9EB&hwIctVe$k)reXdPv?itj9vf)T zf&>{_Y~csjM`fWUKLgRT4hc?XkR2B78WUFFc(^UN0k*U^!uhG9kiA_TwVl65J{76Z zcK095GXIIpLOqsFJbM?C_Qu2Pt?F>zTpuz8w;&_EA5>Yu9=|yk#&^k|#8m7k#d%?F zcw$i;xmtD*=~V86!~9HYKW!Q!D~I4OLTX?T6GulpsY9_sAr_aXAo=6h(8PU{sA|7PVmNgYHVo%++36{uJB@|O zXU$O4i~D3tvnMW)N}{I4Eik%f7_a60HD0vYCT{z9IeK$@Saqd)8Ct;U!-?7mYHLp! z^YRIJ>N^gyaSZMBD??{PMd;WF6RL#wLz?GSG(RW-jVF)rg8d^Q;!+mhTs4n0h8{q} z@2$rxJXVo$I+K~zQBh#e9cKI{FT^Paq(GJ|;bpi*kUeYe^R@AMaxZK=EnL3=ec@tY zrQv6IJ8B;yx0W~b^tEz+``%p;ULi|L&BDQdtPJ*gu?uC-v4GGS z7U=7{VbH%xiKWHLbZEvGr0bRf6Yi>`McZ9*yzf-vruTz9b@D;0+$+(|kQ<=osKj*c zTnsyw1tZtgsA9iywXuO zz@z0o^NAUX_B{+n6Dl*&uoDYVd}J%iNK}F!i{4Y{lLo`B@bfZW+<|*iI|Mug8c41 zK$8~o(Zui9VVQ6aiJDMF!su1{W|1VMXoWKD(f1@*ED~MSut1Mr3ZlqODj>_mk?2bX z(D`aM*h+OkBGEzXBqo5-LmO1@C4fBNo5M7NEC>`5hdnF55u5odkb&`b^2tt(K3q4C zNj$9q*rOQwu`$e*EE~RlRT6V~-8ANLSP^MH{KGOhA&O?bI9n|uC{Hix855Md69$P$ zqc7{e!kB=ojE4GMH0!7Z&#H42a+e8(gX+6UVR;t*v0Mj1gf+0T@gU~qORUSv;6*yzY=ZZY9i7 z6h{XJKSxfwg*?@^w~(i80XmhcbV!x~7-<|tmy@G0PkTQ9BGX2L<4c%wQ9ctH5{Gmj z9AtFwgff}F33zwma1fYAm^b1B;PX^}X8PD%(kWHKpFHpq*)wZ2N-I;No|jiK$u>Jk z#d%ROTXPtWoWZBqNd>Bn+{xH8=!?&4h@mt06qhBNUQWF zQnbI2Y(BD!$BSPGpB*@KY>E+%u;%ggU>IGV+X%HcJ@AtH`Se1DD@vC*3i?|(^zfDg z3>dwQoX4MF*s}THEi(r#o1R5NC-YHowign+e;&PhrwV=#RLIF93fpEUVeL~F(diQ- znFGR;sdLUwV!2~1s$Dx0t9@yM%ISRYNVmcTNo&!|Ne@V}Q$H*%5sRBdD!|C>4mJLn z&6I9i431G!L-3>(hj{+6 zUx|OiUa~WT51WO{QCx_V<;+vwxNe0MJu9RLGnEwJw#q8lj@Q!}KQ=R8r|hRAJfaI(;{^+I0u=!YR z*d(b$xhv6lch}g|tR_lp1c#s!wSnv{8#yeLZ_iDB=!;`~V<3H+6mm!X_*VkNvFc)J zc8W$A@;v{Q8Io6k9|$Inogy4WpMU?toZYSs-+!z{8t=21f)n}VSotS%D$^d>xhn9& z#wOC5Ve=txj3Qi6ozC3Kc4w4o7tmcVRG~u*(QA9n==G_2jr_d7fPu9y zNN5+KaNEyx+GaC4p=+OnYu-;EyF22P7st`CWCJ|#;z?q3`3TH1vBFKm&%yDfyTL{I zFq$2BAFElZ02RATPvj({m(JI6Myo z1D64j7b0FZPP9-d18=mkq?I|%jDmU$e!X3UjMm#k4Kv>(*{*%V&n*t{I)n$|xi(-h z_5;nER*R-79AZ|FEds?^72wTHAQ7ftm?q8jAnbOErtBM!%@b6(NxG?2NUOopL%SGX zxAB7?BLeZ!+ws^WN{X)h7KZOUtpa6zDfBS7h5t1ABAv8j3;yVFj2L^j^0o@)BJH5L zAT2(aWt6Aj;%UK!mp z0Z*H@mp)t~$)%+UVBhF#*hE+qd-;iSvizM?b-_%!-)9K(?&k`k8NUE~Bw4ba_rIbn zy`Maz^V{(y&A~8g@CZ)pt37I!NQEm#8O$gL9dzjMM5y(hLCe`XMszX5#N7D;HW5d0 zLAwEsvdYK8Gp8_HhqlqLiY`d6bRCt79?JRZY=qW41-8jV8sDLl@wUkcj8@qXdf%@T zO>)honb(%V;A$2;J=Sox7qakP;Zk(bDixKhhoaECCD{C1I~Fjy3YreP;jMN65*O!j zd+w^hhXoBJJxGU}9&V4#-9;H0Vt{UG?ZpkY*O9xPJZ6hR=q%|&)O%$jZ@$W2JR?&~!LQ~nstX>oxe51t}ATaaxY``olzWYxgW+Gdeex?YuK;}B@6MXoDh_C!h?>U*#tNIAJVzU zzgZ4W5Mud`2XJEW4RkHjjP7qTL;ZqA;M`!UB)E~BSiC9&cHBf4rt8yYL*O7}cE3VZAYsfey1_y!KZZX=x0 z`r9(dleCs*N;e z^1k?E-BT$vFvJGOje7|9D}ONYlG|w9&l~jh?JO$4bTi$tcQAUjC=3s%h``<#+*sXG zW8AQ21qe?lgmcx?uy}4XHM#Eq#|Mt$&T7$2@1|*4VMmKQNu?lx7z_{Oc$f6<)NId?Fth5 z;uNnba|kE>WjtOfA;$Up7f~x^e|BMx3VY4j6|Y}96QZ%M;^qnC(C&gc^louAJ>l5^UT&gjXxcKeD^!|m3eeyZm$lQkH~i@H z_H(?KG=eI|$gz*#?SZY!3}AF^I)AhFRC+FVD!cROVVpbnD{5}cBkdDkz#C0>tf*B^ zWn+&b>&N4r593i=MZw3>BX%xl*OrP7@6PM3CXDv+?`4?okKku+^U#!wqp{|^ z42Ztp4oW8%Y{?Aqwv?lZ9t7b~q=gfIc@FM-PuWg(tn#;bJzp(+zTWX{+8B zMrN4;mt9;4EsG0j_RufLl9h+n%D0$2aDb<#Qk>mPe`stsddIcO#gN(6hjATp%H^QV#$8{nJOQg#-@x%rBXHt?-Plm}1rC$nfzQf173Zwj_604!o?e( zh}X7cBqota3pY*0Sp&yX)jTQAbG8IkUowW?ANZ71316f-=aRs6QVyHsaGaZ3|NlF@fVG+1Yk< zQsW^8NeiKbleZW%by*x#3=F(p$cXBNKvjebh^~zx3<)3u^Q36obWhZ}r=3~7N|`?S z8b_+9chGOwQ{i^4B=sDaiB4EMa<@koV9S=rjM3?pRN$xt=e&SW8x>=U(>B1PyBCQ^ zL_1x-`YrRZbsL_wqLGRS%mmdB)?Iy16!JUf;_8euNKkDIx7=I@m%|gHx~h}tE!u$( zO<6{l-WW)d;*6QVz{PMZPLsX)EE}7MFx+WVM?7VmGma5Y$KeB~;5|x@p=9+v8f;ty zm>s}=&30rjeA+_I!uF9D+ImQ#zCZiO(4MUpdyb~6mNTm*^N`i3%`pC1*P6I~6s%L@ zz{ZyH5~i49+0mWEBc+7C>H5zU-0=>yUS&(aE<$8HXX$xVxTXfwx(km66)OZOchR zsV`?F}S`6!SxWcKxilzlhsv2Fm9F< z7uD!UrM9=Bp%*Su;}5c&#>X{CZ;$~e|6vU@Cz#W+Tl2{JWxLot(p~GFtcjp-$%W2A zad?hFG@Vnz;CYTCX~&7-beo77H`BJ2M8H!l%NOS=K6f$~hB)B2ON3$NIB|MIIth`( z8(>I~9*Essj_evN>5Zq-?Alr{JZIcMJh&)^CWIYBp31HC^g|QkR%S=Z2^~6FOa?E# z(m7nQZMVx$m8wK}o)M?aCd}xj~9VZ?OKQBo!_D1IDU1AA}-Vl!W*G#6V2`3nf z9UIXGUq0NtcnH6@Iz}2Z9>I&xLty{U6Xe#CNZ#Okn#@AQ{^**|e~}3N0{x%S^~~gt;f4BUk11PHQ zI`ItHvsQB4LH)0=-nbv&xvFS*wFVx#Uz<&I?vIrsnwVE2tFbT}0n#>dtoyQEuxL&? z}@`tZnFb_uPP-g-6Xgxg6EkD-`|7QfCzLy zWh=e+<2&KbD*^EUpt!e$+IBKqY4GE;YX8dbr(Yg(qn4kQU9xENsekjbqv(&!PQEPSRP^Cv; zvOp9@j`K$TH|p`R-Jg)r(x?1$wNr5G_x-52^%PzloePcp4Adr*#h;R(Ozjlq;FYZ# zQCjht5sQ{Ud$Ky|wZZf8`ygAi;rtOgC`l16zUK2K#8aT*$x?PzQX8|+YckZynbFO* z)3J`gbP}i(iY59dgH-lac=hBAI(lmsyff=hk1Y5?E0>DWkalxM&BzeO>3YB=t_||# z9z*u&aL`_SoH0;tAbV|f>2UcKbfLp_UWl0yZn``kDdYu0y}Bg~pBD$Vg^s8}VFwc- zT>uZC?IkiI?Qquj3KH>;1Jm0raCCSSWBu(W@_ic1)BCJJ3ZA!+^aC$RZQua9Z{qP8%ON^P zm@D|!#@zTCj!T9uWE?h)CJwn*k$##fF7LOWi0@lNHqA@H8U5yi*RY$&OX5CJcfN~) z&ir76R`^h9spWjp#Dl2i>Q3gmKr&u%sDzp)6~NHa&DCel8_}orV(5mb2d=|1*!uE8 zNPPK(n3@?7@0lz$$}d3MnqqPMk8*ykZZI^4ZbPrHxq-b?2N{0(dDpp-CQ#WYk7Q>Z zMmATRk>&f3%q_AXBCm;4brUK2@w6cEofiOeQU#IsT?JZWo{dzjb@9Efc2}Rf0~9W| zG7+)!QP8e;RWg3XWZLHSbV&AnM(kTZ6reQ>KOCclttE4*^W0&OKUDz>4ahhqDLeue z)GP6d1z+*T$g44xc4LWs`B$`S=r^9V_YE{`kN_kl*P>EUWnS*K9A>w@IXPpjk9CZ? z&KHk6O8Z~Y#PpRGU9E3NoEFA`Ro*tz`P3ZWPnw6+2k61Wss`jRz?1B`{sIE-TT*cw zOT2U50ji~&2?7Tw({w2d%x4I}&$S21^GRRGgj-^~`e822E?;GshQ)9~Of;OykD}A& zJHqgg7!o-n37QJlcg;)sFg0Wpd@@p`wO3C=%=1xX(y2pOb7?aZxL=KWPj|sC&+kI^ z%%;Q?c^|;#yCSkvM_t03WAF#4cmBw2QVk%;G zF|`V|kUlyer}}I|4wZ{&!<}?`G%p&x+c$#Fm9$|lKGOn`XIt@L)hZ}+SdT>Vk79#! zvyfwLG0!=}g|4<^)qSv&wPApT?)c<}Hd{XT>g~r{)ZD ze_sd7Tb|JBTn!qXiC~(hE-Pw(fZVI3)Gq%5j(ReaHV8;j1xIJp{CON}asCOTK5ZuV z^UQO6`s@+BKum;Oc@a%)vtQ#A-->bRvol~|8cAL~I)#O`CLurJ8I0yDjxn!k1%3U) zh&!u_Hr_M@(V&O4ajh`U4L^g7Vy&oUz#){DJ`N;L31h1gEfihSpUw9RAyTD7sYRLr zmla^dS6HCLc?HhKog+M{cL>YeiM60V?n%;;aj|%8un5Q&#N(40=c(}m2YPu_4Z1q= zB%0rH2et1h#%`YFG$`jP4EbCN{0oaw?&K6IE8lf~dQU6w-qGU}Cr`xhoK%<>&S$Zw zjsqS&BAI&Iq~M};YIJht29l(w{EeJNI}=@CTU!#dXxdhy)FI4T`NB4@&fI(joT)K9I zW@#ExX@w}NbZRG_B^W{auQJCL9y9697dz;IYtxbHp2g;S4x-!acIe=} z=G`O~^p>F*dt>niQapMKyg2Vd^ICZ{Y-kEx?es(a6CzNtjtJ)wa1nJ}iKH>XXQ;!X zO(-FEB2GS$#_r0lpl5PUfp4Q9LOC?n}5n-={whP;`?BHRB$YwYPHZn2Yn#Bl6{ci#fFTZ zjyHQ0pp)B`SbkeJ9{If$HU;lfo< zdU?Qg-0|ZJK2m86&gPHsDTQOCMADl5sMAR*2Y$up8ceuja!<*U{L8c=^9B;Ttwd{` z9jW)8YAXC%ibknV;HuqEG3HS!tlp0p>U*1E(@d4wtoh{#WlW=$YvkBWldi4YbJ@`L zpome-cnh-**sxpQr{a!7;Y{Z93i#NnLs3Z(Tjh0{n$!kyA$FZ~C|?9CG!Do6535t- z_SyJ-WlIrka?TyR*3o$}rvG@d3<52OiNV?x=R)8|3nJ&e=O8gUaMbMS;+;cS5FH9T#x z4vZFy$NR47Q?*@MTc_LLHl8b zd>c)X9SkX=M_Aj7shnRy16Mabmn(GCWJQ9bSwpqQbiRKs)CnBG5_O8m&haBims8Fx z@)Y(H+KKA5Q^}oW87!}HH0wEe9n&J2)%B0z31?-SMzy4ma%~4TAiss#?90MBModSX zwOKh6oNGha!ZSX!ET)qlKBK^j9vi`WzLjU&EBCPLQ%0bInW3!s*j@B+OFxPpl#?eZ zU1xV|Yw-Lh5oki=R!-*Qds0g*IP5Bmoh&A?lMMGWmbWdqIeF4t)P+P2Zn7x0-*o)i z26cZG*g)Ij~djNws6WCwEb96MoxT%(qh)=Hx7WaYM-tw(OoAw@G{l z_hOU*r};PspIfiO#teE2N+a}elTI`~+gQSRxNae;i&k<`sh3fak08WtvVuaHP&W9w zD4TVo9l4arfx?^=Zi&YNfr5MM@9$hVP1+0cb2^2jaN8H zvR|GcWenk-u3gPNU3i$Ak=q|luNP%ql}gzg&z7-=hkS%RJ5y<^y)1rn_b#sZRLu^5 zkb-Xyl*Y54r7{AN%h+?JrflATON`6RZEz^?3|;)8gr?bLbNAE?L8hq;?=%^Vf{YZ| zS!E-jS>qHZ5Ys{1hxOw=?sR06Q;Rv-Vk6Yy=!D!{1ds!3z@EFdj^5MuqnbOi+3~yW z;i2~>c5a&(9h$n8YdkRmW-89(W=L0{+M8MI2-mCpkyW+a2BVSOrne&WyE>ms5XeBQ zTMzQwQ%gAE;`yw9$6c7$qyXDB(vjvAH||2+KHT2njBYCBz`k)N*s#8wlOCHL4$hkNIG&Fy9~dm669W=XrQ;kJZPih4m!xE>)hT~S8j)XEHmwc z2D@i%0R?;vZ%t_A?J@M>r?cVs$Ar^#VOuWEo@a_Tx2{1}7cr7xVuE*)EIK@;S z62Q~cYhcyHB6wVB%Gy3WhgIIrW`$1{;n?$fcuZ+0zW8Yq9o?bME%8*q>lAHSv+7ms z%zke8xM?b*?6ClO@7Rs}_bIX4(pYqM##pj(_9k{>_HlktojRv?doP@J3+9Ah#4vBB zf5cbrI6}mRaK`gh6U}&4OG{IDtpDiQoORI&$S8~F1~;5x!rRku%O+8BTr`lbUhT;q z%NGSl)fm>!Y7n(_`$!M%GexhP6}a6O%$QZt7r18;Mv%ZC&rUi%n3FeiU>8O_q>Sm- zuJ4aa%rL91oQlgC8jyAqI?5DKlV~P0`y=KYm(Ic81I8lX5kv7cjVI`Ia1>fNP?D8e zt%N_+IcnkOhoroY(1|GrcS zg_K^#E0T_*q|=A_6Svv1j=39=|C__K&2T%=5OJ=vDU&RTL)7X+44CeoPF>el(6P&s z@hw9OPUx{2$8QdW+aXq5$s9v?DlUN+jy;Pugqg9C>%QZftHU8!O@*~wz7>D-OrfsL zHF$hZ6BXu}P`~$kxFx4ENq_SZto4L_v?WK5-FcEnJ7vY+H1F%SeK3&~U7H&_>$9n?Spi*&RcJBdWjI-SFNvyD`R1OAH1uEb zMYNr9n$~<2*7#W{0#P3^UEFM1WI&Y(- z4$Cj!WwOW}v>~d2#tk|GiUv{Wq~vu-;r7uhp8aU-^H929K%EM<%|JeF1=#nkKDys_ zn1DnW{cf#94b->e_fOuC6@g#S`9y2j+oi8|Q8ipQv4rXP{1X001?S>Ob)JTCI;c)^ z>t>~)Eq5D6Zs+&DKV+l^C0z`O(1lPlO|c9Uxl?0I(Z#ILCYMerWplpY@2@n(qO!w; zk?3-|l1fqP*ggACywCG~p7(jzzIp^gSv|X&tqwsMwQ%a#EEaKo8eiaV$@edG>jHS1FY!Z;)iUovi$Th@Y{# z59BU2vSQt9SasYE+&oQCrS0Ez*mDN%vf2r?S)DjX&y}t`C&kjyYc$3$mgyB~@&?B9 zc>}LN@;o_Cn77Xa?xeK=|T7MT5flB8%}H zfP?bqxV687$ZZ~>g`29Nv(b~4-p|LNP3a)o;mxi4riLx>vKL}HMJ(Z-0Xzn0=FvOQ5%~hc5@eXS`89eK6k*=R92{BW$9_21#70jiVS}5#urjHS zSDUHJ!zPI!k2;Aa1#?(auLE(am9l{M6(~AojFl4)$!dow2+o;;+qSDvYn4)pO>E%X z&^T^vJdB0@T1;nuc^h8_q|pH@Gp^{rON6AUX0*Bciu=!MGr{Sn9-{Ri4!^q|Dd`K= z;g>9_q%DnNy5X#ZWJZUe<8e-rI$K8x?`|3y?+*`Z8voL$nr#ingZ z!NJ?sH1VUH7L#li9&AiO2%yv2Od+v;5d`1-0{s#-nfjaOM~peXJ#hb?b=s+C!XSx{TB`aFP>ex}j|P zB%oobOY7A<^40z@e5**IbHg-Y4*ntW*1iR`xicYqfe5dhmXZ%s=FI!88Kk=g&}{j8 zZtM1)w6HvlT8ga6k|GIx8>_+uWv1q5pv{cjbXeKRhmxZ~OF3;V7nnXFrS7qr=sw>b zqq7|G`Fj;stn3)r&N3w?qxleX>IDSb)xr6_>u42!l&-IE;9^umSxc-h=4F|4O-@nV zNsk1WC{Dn;CFag*>s#URtDk6Ba6KKEk&VMMk5h?d3U=<|@NN8D{A;5P#st}O)~+Ic zjY>7`x;4V|8giZXyxxzHA5ViETqHMaij+BrFJ|r&<3LHX^9{$fc+Wu{9*-|ZgHj!8 zSmB6|PA>zaDmmTmbClfKQb;pu#IWVuOt4}SHdR_m%=Z>??Zt=K-U%-*y5uZ#jk-@h z%>EiKD4F=W?nqjBSt{ET-pxgg_%RJD18_L6^a$e`_(ZFhnk}!9&6?6+v2hQeH+t4*cJ6u~KxuFzZ>kv3 zih=iaqN9@ac%5Br~x>VK(*Q{NsMVe zUe-j|w9^meFXxf8o+Y?4(h%dj#n?aSLj$#LLHnO_F5=!5VzlK6dG$F1O0BDC=k){{ z+M0>Jw>8-$Da@fnf>qIsqlGWR8=Q`?~aJS5fnU(xP>zh`g<1aJl&5Q2r)3s*m z-)h2Eed=V(#(x0q&S_ZXJt_HJnSojFJB4K_yV3p8J9s*%z+wL^5I^-{+Z@F3a#05x zEs166`yLRFufNB<5D#W~<)APt!vSM*7L%fFY7J<2Le}E88LZb66@;`p2D~hdY>3Av|svLrQQYr4L`x}>KXC%a}I0Vz3H=y{LDNWi{3p1P* zkecPlvOKCGFDDy=t0<_oRG>FnaKX0lh+-a>~$mHz*ehMO8tHL!dUCz4d>!CmRFG6GHT;b=?a;TnP4nAL42uYs|gm5cD z!*tR~;pBB_y8Rn#6Nb2CFGGlsnbXtRdH8I+2@{sR#_xX*WV8G3;|uj082(!t(kP`q zS7%_wF+IL$#~>!Fi-bIF3*7vTnE!Ncf({O8(hCkf7)@;vdSnu*zbBn+{wE!7Nx|Ci u3cR$wjA+C=3R~^};3nSm(Oxqp-(DGqh8L7OY>OM($p9}Wt-)KZn(Tj?o?grV literal 0 HcmV?d00001 diff --git a/tensorlayer/models/resnet.py b/tensorlayer/models/resnet.py deleted file mode 100644 index 458f25912..000000000 --- a/tensorlayer/models/resnet.py +++ /dev/null @@ -1,203 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""ResNet for ImageNet. - -# Reference: -- [Deep Residual Learning for Image Recognition]( - https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award) - -""" - -import os - -import tensorflow as tf - -from tensorlayer import logging -from tensorlayer.files import (assign_weights, load_npz, maybe_download_and_extract) -from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Elementwise, GlobalMeanPool2d, Input, MaxPool2d) -from tensorlayer.models import Model - -__all__ = [ - 'ResNet50', -] - - -def identity_block(input, kernel_size, n_filters, stage, block): - """The identity block where there is no conv layer at shortcut. - - Parameters - ---------- - input : tf tensor - Input tensor from above layer. - kernel_size : int - The kernel size of middle conv layer at main path. - n_filters : list of integers - The numbers of filters for 3 conv layer at main path. - stage : int - Current stage label. - block : str - Current block label. - - Returns - ------- - Output tensor of this block. - - """ - filters1, filters2, filters3 = n_filters - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2d(filters1, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2a')(input) - x = BatchNorm(name=bn_name_base + '2a', act='relu')(x) - - ks = (kernel_size, kernel_size) - x = Conv2d(filters2, ks, padding='SAME', W_init=tf.initializers.he_normal(), name=conv_name_base + '2b')(x) - x = BatchNorm(name=bn_name_base + '2b', act='relu')(x) - - x = Conv2d(filters3, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2c')(x) - x = BatchNorm(name=bn_name_base + '2c')(x) - - x = Elementwise(tf.add, act='relu')([x, input]) - return x - - -def conv_block(input, kernel_size, n_filters, stage, block, strides=(2, 2)): - """The conv block where there is a conv layer at shortcut. - - Parameters - ---------- - input : tf tensor - Input tensor from above layer. - kernel_size : int - The kernel size of middle conv layer at main path. - n_filters : list of integers - The numbers of filters for 3 conv layer at main path. - stage : int - Current stage label. - block : str - Current block label. - strides : tuple - Strides for the first conv layer in the block. - - Returns - ------- - Output tensor of this block. - - """ - filters1, filters2, filters3 = n_filters - conv_name_base = 'res' + str(stage) + block + '_branch' - bn_name_base = 'bn' + str(stage) + block + '_branch' - - x = Conv2d(filters1, (1, 1), strides=strides, W_init=tf.initializers.he_normal(), name=conv_name_base + '2a')(input) - x = BatchNorm(name=bn_name_base + '2a', act='relu')(x) - - ks = (kernel_size, kernel_size) - x = Conv2d(filters2, ks, padding='SAME', W_init=tf.initializers.he_normal(), name=conv_name_base + '2b')(x) - x = BatchNorm(name=bn_name_base + '2b', act='relu')(x) - - x = Conv2d(filters3, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2c')(x) - x = BatchNorm(name=bn_name_base + '2c')(x) - - shortcut = Conv2d(filters3, (1, 1), strides=strides, W_init=tf.initializers.he_normal(), - name=conv_name_base + '1')(input) - shortcut = BatchNorm(name=bn_name_base + '1')(shortcut) - - x = Elementwise(tf.add, act='relu')([x, shortcut]) - return x - - -block_names = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c' - ] + ['avg_pool', 'fc1000'] -block_filters = [[64, 64, 256], [128, 128, 512], [256, 256, 1024], [512, 512, 2048]] - - -def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000, name=None): - """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3]. - To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68]. - - Parameters - ---------- - pretrained : boolean - Whether to load pretrained weights. Default False. - end_with : str - The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. - Default ``out`` i.e. the whole model. - n_classes : int - Number of classes in final prediction. - name : None or str - Name for this model. - - Examples - --------- - Classify ImageNet classes, see `tutorial_models_resnet50.py` - - >>> # get the whole model with pretrained weights - >>> resnet = tl.models.ResNet50(pretrained=True) - >>> # use for inferencing - >>> output = resnet(img1, is_train=False) - >>> prob = tf.nn.softmax(output)[0].numpy() - - Extract the features before fc layer - >>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c') - >>> output = resnet(img1, is_train=False) - - Returns - ------- - ResNet50 model. - - """ - ni = Input([None, 224, 224, 3], name="input") - n = Conv2d(64, (7, 7), strides=(2, 2), padding='SAME', W_init=tf.initializers.he_normal(), name='conv1')(ni) - n = BatchNorm(name='bn_conv1', act='relu')(n) - n = MaxPool2d((3, 3), strides=(2, 2), name='max_pool1')(n) - - for i, block_name in enumerate(block_names): - if len(block_name) == 2: - stage = int(block_name[0]) - block = block_name[1] - if block == 'a': - strides = (1, 1) if stage == 2 else (2, 2) - n = conv_block(n, 3, block_filters[stage - 2], stage=stage, block=block, strides=strides) - else: - n = identity_block(n, 3, block_filters[stage - 2], stage=stage, block=block) - elif block_name == 'avg_pool': - n = GlobalMeanPool2d(name='avg_pool')(n) - elif block_name == 'fc1000': - n = Dense(n_classes, name='fc1000')(n) - - if block_name == end_with: - break - - network = Model(inputs=ni, outputs=n, name=name) - - if pretrained: - restore_params(network) - - return network - - -def restore_params(network, path='models'): - logging.info("Restore pre-trained parameters") - maybe_download_and_extract( - 'resnet50_weights_tf_dim_ordering_tf_kernels.h5', - path, - 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/', - ) # ls -al - try: - import h5py - except Exception: - raise ImportError('h5py not imported') - - f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r') - - for layer in network.all_layers: - if len(layer.all_weights) == 0: - continue - w_names = list(f[layer.name]) - params = [f[layer.name][n][:] for n in w_names] - # if 'bn' in layer.name: - # params = [x.reshape(1, 1, 1, -1) for x in params] - assign_weights(params, layer) - del params - - f.close() diff --git a/tensorlayer/models/seq2seq.py b/tensorlayer/models/seq2seq.py deleted file mode 100644 index 0473eeffc..000000000 --- a/tensorlayer/models/seq2seq.py +++ /dev/null @@ -1,163 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input -from tensorlayer.layers.core import Layer -from tensorlayer.models import Model - -__all__ = ['Seq2seq'] - - -class Seq2seq(Model): - """vanilla stacked layer Seq2Seq model. - - Parameters - ---------- - decoder_seq_length: int - The length of your target sequence - cell_enc : TensorFlow cell function - The RNN function cell for your encoder stack, e.g tf.keras.layers.GRUCell - cell_dec : TensorFlow cell function - The RNN function cell for your decoder stack, e.g. tf.keras.layers.GRUCell - n_layer : int - The number of your RNN layers for both encoder and decoder block - embedding_layer : tl.Layer - A embedding layer, e.g. tl.layers.Embedding(vocabulary_size=voc_size, embedding_size=emb_dim) - name : str - The model name - - Examples - --------- - Classify stacked-layer Seq2Seq model, see `chatbot `__ - - Returns - ------- - static stacked-layer Seq2Seq model. - """ - - def __init__(self, decoder_seq_length, cell_enc, cell_dec, n_units=256, n_layer=3, embedding_layer=None, name=None): - super(Seq2seq, self).__init__(name=name) - self.embedding_layer = embedding_layer - self.vocabulary_size = embedding_layer.vocabulary_size - self.embedding_size = embedding_layer.embedding_size - self.n_layer = n_layer - self.enc_layers = [] - self.dec_layers = [] - for i in range(n_layer): - if (i == 0): - self.enc_layers.append( - tl.layers.RNN( - cell=cell_enc(units=n_units), in_channels=self.embedding_size, return_last_state=True - ) - ) - else: - self.enc_layers.append( - tl.layers.RNN(cell=cell_enc(units=n_units), in_channels=n_units, return_last_state=True) - ) - - for i in range(n_layer): - if (i == 0): - self.dec_layers.append( - tl.layers.RNN( - cell=cell_dec(units=n_units), in_channels=self.embedding_size, return_last_state=True - ) - ) - else: - self.dec_layers.append( - tl.layers.RNN(cell=cell_dec(units=n_units), in_channels=n_units, return_last_state=True) - ) - - self.reshape_layer = tl.layers.Reshape([-1, n_units]) - self.dense_layer = tl.layers.Dense(n_units=self.vocabulary_size, in_channels=n_units) - self.reshape_layer_after = tl.layers.Reshape([-1, decoder_seq_length, self.vocabulary_size]) - self.reshape_layer_individual_sequence = tl.layers.Reshape([-1, 1, self.vocabulary_size]) - - def inference(self, encoding, seq_length, start_token, top_n): - """Inference mode""" - """ - Parameters - ---------- - encoding : input tensor - The source sequences - seq_length : int - The expected length of your predicted sequence. - start_token : int - : The token of "start of sequence" - top_n : int - Random search algorithm based on the top top_n words sorted by the probablity. - """ - feed_output = self.embedding_layer(encoding[0]) - state = [None for i in range(self.n_layer)] - - for i in range(self.n_layer): - feed_output, state[i] = self.enc_layers[i](feed_output, return_state=True) - batch_size = len(encoding[0].numpy()) - decoding = [[start_token] for i in range(batch_size)] - feed_output = self.embedding_layer(decoding) - for i in range(self.n_layer): - feed_output, state[i] = self.dec_layers[i](feed_output, initial_state=state[i], return_state=True) - - feed_output = self.reshape_layer(feed_output) - feed_output = self.dense_layer(feed_output) - feed_output = self.reshape_layer_individual_sequence(feed_output) - feed_output = tf.argmax(feed_output, -1) - # [B, 1] - final_output = feed_output - - for i in range(seq_length - 1): - feed_output = self.embedding_layer(feed_output) - for i in range(self.n_layer): - feed_output, state[i] = self.dec_layers[i](feed_output, initial_state=state[i], return_state=True) - feed_output = self.reshape_layer(feed_output) - feed_output = self.dense_layer(feed_output) - feed_output = self.reshape_layer_individual_sequence(feed_output) - ori_feed_output = feed_output - if (top_n is not None): - for k in range(batch_size): - idx = np.argpartition(ori_feed_output[k][0], -top_n)[-top_n:] - probs = [ori_feed_output[k][0][i] for i in idx] - probs = probs / np.sum(probs) - feed_output = np.random.choice(idx, p=probs) - feed_output = tf.convert_to_tensor([[feed_output]], dtype=tf.int64) - if (k == 0): - final_output_temp = feed_output - else: - final_output_temp = tf.concat([final_output_temp, feed_output], 0) - feed_output = final_output_temp - else: - feed_output = tf.argmax(feed_output, -1) - final_output = tf.concat([final_output, feed_output], 1) - - return final_output, state - - def forward(self, inputs, seq_length=20, start_token=None, return_state=False, top_n=None): - - state = [None for i in range(self.n_layer)] - if (self.is_train): - encoding = inputs[0] - enc_output = self.embedding_layer(encoding) - - for i in range(self.n_layer): - enc_output, state[i] = self.enc_layers[i](enc_output, return_state=True) - - decoding = inputs[1] - dec_output = self.embedding_layer(decoding) - - for i in range(self.n_layer): - dec_output, state[i] = self.dec_layers[i](dec_output, initial_state=state[i], return_state=True) - - dec_output = self.reshape_layer(dec_output) - denser_output = self.dense_layer(dec_output) - output = self.reshape_layer_after(denser_output) - else: - encoding = inputs - output, state = self.inference(encoding, seq_length, start_token, top_n) - - if (return_state): - return output, state - else: - return output diff --git a/tensorlayer/models/seq2seq_with_attention.py b/tensorlayer/models/seq2seq_with_attention.py deleted file mode 100644 index 800bbaa61..000000000 --- a/tensorlayer/models/seq2seq_with_attention.py +++ /dev/null @@ -1,210 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer.layers import Dense, Dropout, Input -from tensorlayer.layers.core import Layer -from tensorlayer.models import Model - -__all__ = ['Seq2seqLuongAttention'] - - -class Encoder(Layer): - - def __init__(self, hidden_size, cell, embedding_layer, name=None): - super(Encoder, self).__init__(name) - self.cell = cell(hidden_size) - self.hidden_size = hidden_size - self.embedding_layer = embedding_layer - self.build((None, None, self.embedding_layer.embedding_size)) - self._built = True - - def build(self, inputs_shape): - self.cell.build(input_shape=tuple(inputs_shape)) - self._built = True - if self._trainable_weights is None: - self._trainable_weights = list() - - for var in self.cell.trainable_variables: - self._trainable_weights.append(var) - - def forward(self, src_seq, initial_state=None): - - states = initial_state if initial_state is not None else self.cell.get_initial_state(src_seq) - encoding_hidden_states = list() - total_steps = src_seq.get_shape().as_list()[1] - for time_step in range(total_steps): - if not isinstance(states, list): - states = [states] - output, states = self.cell.call(src_seq[:, time_step, :], states, training=self.is_train) - encoding_hidden_states.append(states[0]) - return output, encoding_hidden_states, states[0] - - -class Decoder_Attention(Layer): - - def __init__(self, hidden_size, cell, embedding_layer, method, name=None): - super(Decoder_Attention, self).__init__(name) - self.cell = cell(hidden_size) - self.hidden_size = hidden_size - self.embedding_layer = embedding_layer - self.method = method - self.build((None, hidden_size + self.embedding_layer.embedding_size)) - self._built = True - - def build(self, inputs_shape): - self.cell.build(input_shape=tuple(inputs_shape)) - self._built = True - if self.method is "concat": - self.W = self._get_weights("W", shape=(2 * self.hidden_size, self.hidden_size)) - self.V = self._get_weights("V", shape=(self.hidden_size, 1)) - elif self.method is "general": - self.W = self._get_weights("W", shape=(self.hidden_size, self.hidden_size)) - if self._trainable_weights is None: - self._trainable_weights = list() - - for var in self.cell.trainable_variables: - self._trainable_weights.append(var) - - def score(self, encoding_hidden, hidden, method): - # encoding = [B, T, H] - # hidden = [B, H] - # combined = [B,T,2H] - if method is "concat": - # hidden = [B,H]->[B,1,H]->[B,T,H] - hidden = tf.expand_dims(hidden, 1) - hidden = tf.tile(hidden, [1, encoding_hidden.shape[1], 1]) - # combined = [B,T,2H] - combined = tf.concat([hidden, encoding_hidden], 2) - combined = tf.cast(combined, tf.float32) - score = tf.tensordot(combined, self.W, axes=[[2], [0]]) # score = [B,T,H] - score = tf.nn.tanh(score) # score = [B,T,H] - score = tf.tensordot(self.V, score, axes=[[0], [2]]) # score = [1,B,T] - score = tf.squeeze(score, axis=0) # score = [B,T] - - elif method is "dot": - # hidden = [B,H]->[B,H,1] - hidden = tf.expand_dims(hidden, 2) - score = tf.matmul(encoding_hidden, hidden) - score = tf.squeeze(score, axis=2) - elif method is "general": - # hidden = [B,H]->[B,H,1] - score = tf.matmul(hidden, self.W) - score = tf.expand_dims(score, 2) - score = tf.matmul(encoding_hidden, score) - score = tf.squeeze(score, axis=2) - - score = tf.nn.softmax(score, axis=-1) # score = [B,T] - return score - - def forward(self, dec_seq, enc_hiddens, last_hidden, method, return_last_state=False): - # dec_seq = [B, T_, V], enc_hiddens = [B, T, H], last_hidden = [B, H] - total_steps = dec_seq.get_shape().as_list()[1] - states = last_hidden - cell_outputs = list() - for time_step in range(total_steps): - attention_weights = self.score(enc_hiddens, last_hidden, method) - attention_weights = tf.expand_dims(attention_weights, 1) #[B, 1, T] - context = tf.matmul(attention_weights, enc_hiddens) #[B, 1, H] - context = tf.squeeze(context, 1) #[B, H] - inputs = tf.concat([dec_seq[:, time_step, :], context], 1) - if not isinstance(states, list): - states = [states] - cell_output, states = self.cell.call(inputs, states, training=self.is_train) - cell_outputs.append(cell_output) - last_hidden = states[0] - - cell_outputs = tf.convert_to_tensor(cell_outputs) - cell_outputs = tf.transpose(cell_outputs, perm=[1, 0, 2]) - if (return_last_state): - return cell_outputs, last_hidden - return cell_outputs - - -class Seq2seqLuongAttention(Model): - """Luong Attention-based Seq2Seq model. Implementation based on https://arxiv.org/pdf/1508.04025.pdf. - - Parameters - ---------- - hidden_size: int - The hidden size of both encoder and decoder RNN cells - cell : TensorFlow cell function - The RNN function cell for your encoder and decoder stack, e.g. tf.keras.layers.GRUCell - embedding_layer : tl.Layer - A embedding layer, e.g. tl.layers.Embedding(vocabulary_size=voc_size, embedding_size=emb_dim) - method : str - The three alternatives to calculate the attention scores, e.g. "dot", "general" and "concat" - name : str - The model name - - - Returns - ------- - static single layer attention-based Seq2Seq model. - """ - - def __init__(self, hidden_size, embedding_layer, cell, method, name=None): - super(Seq2seqLuongAttention, self).__init__(name) - self.enc_layer = Encoder(hidden_size, cell, embedding_layer) - self.dec_layer = Decoder_Attention(hidden_size, cell, embedding_layer, method=method) - self.embedding_layer = embedding_layer - self.dense_layer = tl.layers.Dense(n_units=self.embedding_layer.vocabulary_size, in_channels=hidden_size) - self.method = method - - def inference(self, src_seq, encoding_hidden_states, last_hidden_states, seq_length, sos): - """Inference mode""" - """ - Parameters - ---------- - src_seq : input tensor - The source sequences - encoding_hidden_states : a list of tensor - The list of encoder's hidden states at each time step - last_hidden_states: tensor - The last hidden_state from encoder - seq_length : int - The expected length of your predicted sequence. - sos : int - : The token of "start of sequence" - """ - - batch_size = src_seq.shape[0] - decoding = [[sos] for i in range(batch_size)] - dec_output = self.embedding_layer(decoding) - outputs = [[0] for i in range(batch_size)] - for step in range(seq_length): - dec_output, last_hidden_states = self.dec_layer( - dec_output, encoding_hidden_states, last_hidden_states, method=self.method, return_last_state=True - ) - dec_output = tf.reshape(dec_output, [-1, dec_output.shape[-1]]) - dec_output = self.dense_layer(dec_output) - dec_output = tf.reshape(dec_output, [batch_size, -1, dec_output.shape[-1]]) - dec_output = tf.argmax(dec_output, -1) - outputs = tf.concat([outputs, dec_output], 1) - dec_output = self.embedding_layer(dec_output) - - return outputs[:, 1:] - - def forward(self, inputs, seq_length=20, sos=None): - src_seq = inputs[0] - src_seq = self.embedding_layer(src_seq) - enc_output, encoding_hidden_states, last_hidden_states = self.enc_layer(src_seq) - encoding_hidden_states = tf.convert_to_tensor(encoding_hidden_states) - encoding_hidden_states = tf.transpose(encoding_hidden_states, perm=[1, 0, 2]) - last_hidden_states = tf.convert_to_tensor(last_hidden_states) - - if (self.is_train): - dec_seq = inputs[1] - dec_seq = self.embedding_layer(dec_seq) - dec_output = self.dec_layer(dec_seq, encoding_hidden_states, last_hidden_states, method=self.method) - batch_size = dec_output.shape[0] - dec_output = tf.reshape(dec_output, [-1, dec_output.shape[-1]]) - dec_output = self.dense_layer(dec_output) - dec_output = tf.reshape(dec_output, [batch_size, -1, dec_output.shape[-1]]) - else: - dec_output = self.inference(src_seq, encoding_hidden_states, last_hidden_states, seq_length, sos) - - return dec_output diff --git a/tensorlayer/models/squeezenetv1.py b/tensorlayer/models/squeezenetv1.py deleted file mode 100644 index b38d42dc8..000000000 --- a/tensorlayer/models/squeezenetv1.py +++ /dev/null @@ -1,111 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""SqueezeNet for ImageNet.""" - -import os - -import tensorflow as tf - -from tensorlayer import logging -from tensorlayer.files import (assign_weights, load_npz, maybe_download_and_extract) -from tensorlayer.layers import (Concat, Conv2d, Dropout, GlobalMeanPool2d, Input, Lambda, MaxPool2d) -from tensorlayer.models import Model - -__all__ = [ - 'SqueezeNetV1', -] - -layer_names = [ - 'conv1', 'maxpool1', 'fire2', 'fire3', 'fire4', 'fire5', 'fire6', 'fire7', 'fire8', 'fire9', 'drop1', 'out' -] -n_filters = [16, 16, 32, 32, 48, 48, 64, 64] - - -def fire_block(n, n_filter, max_pool=False, name='fire_block'): - n = Conv2d(n_filter, (1, 1), (1, 1), tf.nn.relu, 'SAME', name=name + '.squeeze1x1')(n) - n1 = Conv2d(n_filter * 4, (1, 1), (1, 1), tf.nn.relu, 'SAME', name=name + '.expand1x1')(n) - n2 = Conv2d(n_filter * 4, (3, 3), (1, 1), tf.nn.relu, 'SAME', name=name + '.expand3x3')(n) - n = Concat(-1, name=name + '.concat')([n1, n2]) - if max_pool: - n = MaxPool2d((3, 3), (2, 2), 'VALID', name=name + '.max')(n) - return n - - -def restore_params(network, path='models'): - logging.info("Restore pre-trained parameters") - maybe_download_and_extract( - 'squeezenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/', - expected_bytes=7405613 - ) # ls -al - params = load_npz(name=os.path.join(path, 'squeezenet.npz')) - assign_weights(params[:len(network.all_weights)], network) - del params - - -def SqueezeNetV1(pretrained=False, end_with='out', name=None): - """Pre-trained SqueezeNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1]. - - Parameters - ------------ - pretrained : boolean - Whether to load pretrained weights. Default False. - end_with : str - The end point of the model [conv1, maxpool1, fire2, fire3, fire4, ..., out]. Default ``out`` i.e. the whole model. - name : None or str - Name for this model. - - Examples - --------- - Classify ImageNet classes, see `tutorial_models_squeezenetv1.py `__ - - >>> # get the whole model - >>> squeezenet = tl.models.SqueezeNetV1(pretrained=True) - >>> # use for inferencing - >>> output = squeezenet(img1, is_train=False) - >>> prob = tf.nn.softmax(output)[0].numpy() - - Extract features and Train a classifier with 100 classes - - >>> # get model without the last layer - >>> cnn = tl.models.SqueezeNetV1(pretrained=True, end_with='drop1').as_layer() - >>> # add one more layer and build new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = Conv2d(100, (1, 1), (1, 1), padding='VALID', name='conv10')(nn) - >>> nn = GlobalMeanPool2d(name='globalmeanpool')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('conv10').trainable_weights - - Returns - ------- - static SqueezeNetV1. - - """ - ni = Input([None, 224, 224, 3], name="input") - n = Lambda(lambda x: x * 255, name='scale')(ni) - - for i in range(len(layer_names)): - if layer_names[i] == 'conv1': - n = Conv2d(64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')(n) - elif layer_names[i] == 'maxpool1': - n = MaxPool2d((3, 3), (2, 2), 'VALID', name='maxpool1')(n) - elif layer_names[i] == 'drop1': - n = Dropout(keep=0.5, name='drop1')(n) - elif layer_names[i] == 'out': - n = Conv2d(1000, (1, 1), (1, 1), padding='VALID', name='conv10')(n) # 13, 13, 1000 - n = GlobalMeanPool2d(name='globalmeanpool')(n) - elif layer_names[i] in ['fire3', 'fire5']: - n = fire_block(n, n_filters[i - 2], max_pool=True, name=layer_names[i]) - else: - n = fire_block(n, n_filters[i - 2], max_pool=False, name=layer_names[i]) - - if layer_names[i] == end_with: - break - - network = Model(inputs=ni, outputs=n, name=name) - - if pretrained: - restore_params(network) - - return network diff --git a/tensorlayer/models/vgg.py b/tensorlayer/models/vgg.py deleted file mode 100644 index c57572e24..000000000 --- a/tensorlayer/models/vgg.py +++ /dev/null @@ -1,366 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -""" -VGG for ImageNet. - -Introduction ----------------- -VGG is a convolutional neural network model proposed by K. Simonyan and A. Zisserman -from the University of Oxford in the paper "Very Deep Convolutional Networks for -Large-Scale Image Recognition" . The model achieves 92.7% top-5 test accuracy in ImageNet, -which is a dataset of over 14 million images belonging to 1000 classes. - -Download Pre-trained Model ----------------------------- -- Model weights in this example - vgg16_weights.npz : http://www.cs.toronto.edu/~frossard/post/vgg16/ -- Model weights in this example - vgg19.npy : https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/ -- Caffe VGG 16 model : https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md -- Tool to convert the Caffe models to TensorFlow's : https://github.com/ethereon/caffe-tensorflow - -Note ------- -- For simplified CNN layer see "Convolutional layer (Simplified)" -in read the docs website. -- When feeding other images to the model be sure to properly resize or crop them -beforehand. Distorted images might end up being misclassified. One way of safely -feeding images of multiple sizes is by doing center cropping. - -""" - -import os - -import numpy as np -import tensorflow as tf - -import tensorlayer as tl -from tensorlayer import logging -from tensorlayer.files import assign_weights, maybe_download_and_extract -from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, Lambda, LayerList, MaxPool2d) -from tensorlayer.models import Model - -__all__ = [ - 'VGG', - 'vgg16', - 'vgg19', - 'VGG16', - 'VGG19', - # 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', - # 'vgg19_bn', 'vgg19', -] - -layer_names = [ - ['conv1_1', 'conv1_2'], 'pool1', ['conv2_1', 'conv2_2'], 'pool2', - ['conv3_1', 'conv3_2', 'conv3_3', 'conv3_4'], 'pool3', ['conv4_1', 'conv4_2', 'conv4_3', 'conv4_4'], 'pool4', - ['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], 'pool5', 'flatten', 'fc1_relu', 'fc2_relu', 'outputs' -] - -cfg = { - 'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], - 'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], - 'D': - [ - [64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F', - 'fc1', 'fc2', 'O' - ], - 'E': - [ - [64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512], - 'M', 'F', 'fc1', 'fc2', 'O' - ], -} - -mapped_cfg = { - 'vgg11': 'A', - 'vgg11_bn': 'A', - 'vgg13': 'B', - 'vgg13_bn': 'B', - 'vgg16': 'D', - 'vgg16_bn': 'D', - 'vgg19': 'E', - 'vgg19_bn': 'E' -} - -model_urls = { - 'vgg16': 'http://www.cs.toronto.edu/~frossard/vgg16/', - 'vgg19': 'https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/' -} - -model_saved_name = {'vgg16': 'vgg16_weights.npz', 'vgg19': 'vgg19.npy'} - - -class VGG(Model): - - def __init__(self, layer_type, batch_norm=False, end_with='outputs', name=None): - super(VGG, self).__init__(name=name) - self.end_with = end_with - - config = cfg[mapped_cfg[layer_type]] - self.layers = make_layers(config, batch_norm, end_with) - - def forward(self, inputs): - """ - inputs : tensor - Shape [None, 224, 224, 3], value range [0, 1]. - """ - - inputs = inputs * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]) - - out = self.layers.forward(inputs) - return out - - -def make_layers(config, batch_norm=False, end_with='outputs'): - layer_list = [] - is_end = False - for layer_group_idx, layer_group in enumerate(config): - if isinstance(layer_group, list): - for idx, layer in enumerate(layer_group): - layer_name = layer_names[layer_group_idx][idx] - n_filter = layer - if idx == 0: - if layer_group_idx > 0: - in_channels = config[layer_group_idx - 2][-1] - else: - in_channels = 3 - else: - in_channels = layer_group[idx - 1] - layer_list.append( - Conv2d( - n_filter=n_filter, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', - in_channels=in_channels, name=layer_name - ) - ) - if batch_norm: - layer_list.append(BatchNorm()) - if layer_name == end_with: - is_end = True - break - else: - layer_name = layer_names[layer_group_idx] - if layer_group == 'M': - layer_list.append(MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name=layer_name)) - elif layer_group == 'O': - layer_list.append(Dense(n_units=1000, in_channels=4096, name=layer_name)) - elif layer_group == 'F': - layer_list.append(Flatten(name='flatten')) - elif layer_group == 'fc1': - layer_list.append(Dense(n_units=4096, act=tf.nn.relu, in_channels=512 * 7 * 7, name=layer_name)) - elif layer_group == 'fc2': - layer_list.append(Dense(n_units=4096, act=tf.nn.relu, in_channels=4096, name=layer_name)) - if layer_name == end_with: - is_end = True - if is_end: - break - return LayerList(layer_list) - - -def restore_model(model, layer_type): - logging.info("Restore pre-trained weights") - # download weights - maybe_download_and_extract(model_saved_name[layer_type], 'models', model_urls[layer_type]) - weights = [] - if layer_type == 'vgg16': - npz = np.load(os.path.join('models', model_saved_name[layer_type]), allow_pickle=True) - # get weight list - for val in sorted(npz.items()): - logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0])) - weights.append(val[1]) - if len(model.all_weights) == len(weights): - break - elif layer_type == 'vgg19': - npz = np.load(os.path.join('models', model_saved_name[layer_type]), allow_pickle=True, encoding='latin1').item() - # get weight list - for val in sorted(npz.items()): - logging.info(" Loading %s in %s" % (str(val[1][0].shape), val[0])) - logging.info(" Loading %s in %s" % (str(val[1][1].shape), val[0])) - weights.extend(val[1]) - if len(model.all_weights) == len(weights): - break - # assign weight values - assign_weights(weights, model) - del weights - - -def VGG_static(layer_type, batch_norm=False, end_with='outputs', name=None): - ni = Input([None, 224, 224, 3]) - n = Lambda( - lambda x: x * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]), name='scale' - )(ni) - - config = cfg[mapped_cfg[layer_type]] - layers = make_layers(config, batch_norm, end_with) - - nn = layers(n) - - M = Model(inputs=ni, outputs=nn, name=name) - return M - - -def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): - """Pre-trained VGG16 model. - - Parameters - ------------ - pretrained : boolean - Whether to load pretrained weights. Default False. - end_with : str - The end point of the model. Default ``fc3_relu`` i.e. the whole model. - mode : str. - Model building mode, 'dynamic' or 'static'. Default 'dynamic'. - name : None or str - A unique layer name. - - Examples - --------- - Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py `__ - With TensorLayer - - >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg16() - >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg16(pretrained=True) - >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG16 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg16().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) - - """ - if mode == 'dynamic': - model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) - elif mode == 'static': - model = VGG_static(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) - else: - raise Exception("No such mode %s" % mode) - if pretrained: - restore_model(model, layer_type='vgg16') - return model - - -def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None): - """Pre-trained VGG19 model. - - Parameters - ------------ - pretrained : boolean - Whether to load pretrained weights. Default False. - end_with : str - The end point of the model. Default ``fc3_relu`` i.e. the whole model. - mode : str. - Model building mode, 'dynamic' or 'static'. Default 'dynamic'. - name : None or str - A unique layer name. - - Examples - --------- - Classify ImageNet classes with VGG19, see `tutorial_models_vgg.py `__ - With TensorLayer - - >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg19() - >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg19(pretrained=True) - >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG19 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg19(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg19().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) - - """ - if mode == 'dynamic': - model = VGG(layer_type='vgg19', batch_norm=False, end_with=end_with, name=name) - elif mode == 'static': - model = VGG_static(layer_type='vgg19', batch_norm=False, end_with=end_with, name=name) - else: - raise Exception("No such mode %s" % mode) - if pretrained: - restore_model(model, layer_type='vgg19') - return model - - -VGG16 = vgg16 -VGG19 = vgg19 - -# models without pretrained parameters -# def vgg11(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg11', batch_norm=False, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model -# -# -# def vgg11_bn(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg11_bn', batch_norm=True, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model -# -# -# def vgg13(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg13', batch_norm=False, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model -# -# -# def vgg13_bn(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg13_bn', batch_norm=True, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model -# -# -# def vgg16_bn(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg16_bn', batch_norm=True, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model -# -# -# def vgg19_bn(pretrained=False, end_with='outputs'): -# model = VGG(layer_type='vgg19_bn', batch_norm=True, end_with=end_with) -# if pretrained: -# model.restore_weights() -# return model diff --git a/tensorlayer/optimizers/__init__.py b/tensorlayer/optimizers/__init__.py index e74b38801..ffe9995da 100644 --- a/tensorlayer/optimizers/__init__.py +++ b/tensorlayer/optimizers/__init__.py @@ -5,8 +5,21 @@ various benchmarks and domain-specific problems. In addition, we also support transparent access to native TensorFlow parameters. For example, we provide not only layers for local response normalization, but also -layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +layers that allow user to apply ``tf.ops.lrn`` on ``network.outputs``. More functions can be found in `TensorFlow API `__. """ from .amsgrad import AMSGrad + +# ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] +from .load_optimizers_backend import Adadelta +from .load_optimizers_backend import Adagrad +from .load_optimizers_backend import Adam +from .load_optimizers_backend import Admax +from .load_optimizers_backend import Ftrl +from .load_optimizers_backend import Nadam +from .load_optimizers_backend import RMSprop +from .load_optimizers_backend import SGD +from .load_optimizers_backend import Momentum +from .load_optimizers_backend import Lamb +from .load_optimizers_backend import LARS diff --git a/tensorlayer/optimizers/dragon_optimizers.py b/tensorlayer/optimizers/dragon_optimizers.py new file mode 100644 index 000000000..523e785f8 --- /dev/null +++ b/tensorlayer/optimizers/dragon_optimizers.py @@ -0,0 +1,56 @@ +from __future__ import absolute_import, division, print_function +import dragon as dg + +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + +# Add module aliases + + +# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' +def Adadelta(**kwargs): + raise NotImplementedError('Adadelta optimizer function not implemented') + + +# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' +def Adagrad(**kwargs): + raise NotImplementedError('Adagrad optimizer function not implemented') + + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' +Adam = dg.optimizers.Adam + + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' +def Admax(**kwargs): + raise NotImplementedError('Admax optimizer function not implemented') + + +# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, +# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 +def Ftrl(**kwargs): + raise NotImplementedError('Ftrl optimizer function not implemented') + + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', +def Nadam(**kwargs): + raise NotImplementedError('Nadam optimizer function not implemented') + + +# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' +RMSprop = dg.optimizers.RMSprop + +# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' +SGD = dg.optimizers.SGD + + +# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False +def Momentum(**kwargs): + raise NotImplementedError('Momentum optimizer function not implemented') + + +def Lamb(**kwargs): + raise NotImplementedError('Lamb optimizer function not implemented') + + +def LARS(**kwargs): + raise NotImplementedError('LARS optimizer function not implemented') diff --git a/tensorlayer/optimizers/load_optimizers_backend.py b/tensorlayer/optimizers/load_optimizers_backend.py new file mode 100644 index 000000000..478f61fb5 --- /dev/null +++ b/tensorlayer/optimizers/load_optimizers_backend.py @@ -0,0 +1,14 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_optimizer import * +elif BACKEND == 'mindspore': + from .mindspore_optimizer import * +elif BACKEND == 'dragon': + from .dragon_optimizers import * +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/optimizers/mindspore_optimizer.py b/tensorlayer/optimizers/mindspore_optimizer.py new file mode 100644 index 000000000..cb0b41107 --- /dev/null +++ b/tensorlayer/optimizers/mindspore_optimizer.py @@ -0,0 +1,158 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +from mindspore.nn import optim as optimizer +import mindspore as ms +from mindspore.nn import Cell + +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + + +class Adadelta(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('Adadelta optimizer function not implemented') + + +class Adagrad(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('Adagrad optimizer function not implemented') + + +class Adam(Cell): + + def __init__( + self, + lr=0.001, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-8, + ): + self.adam = optimizer.Adam + self.learn_rate = lr + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def apply_gradients(self, grads_and_vars): + grads, vars = list(zip(*grads_and_vars)) + optimizer_adam = self.adam( + vars, learning_rate=self.learn_rate, beta1=self.beta_1, beta2=self.beta_2, eps=self.epsilon + ) + optimizer_adam(grads) + + +class Admax(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('Admax optimizer function not implemented') + + +class Ftrl(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('Ftrl optimizer function not implemented') + + +class Nadam(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('Nadam optimizer function not implemented') + + +class RMSprop(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('RMSprop optimizer function not implemented') + + +class RMSprop(Cell): + + def __init__(self): + pass + + def app_gradients(self): + raise Exception('RMSprop optimizer function not implemented') + + +class SGD(Cell): + + def __init__(self, learning_rate, momentum): + self.sgd = optimizer.SGD + self.learn_rate = learning_rate + self.momentum = momentum + + def apply_gradients(self, grads_and_vars): + grads, vars = list(zip(*grads_and_vars)) + optimizer_sgd = self.sgd(vars, learning_rate=self.learn_rate, momentum=self.momentum) + optimizer_sgd(grads) + + +class Momentum(Cell): + + def __init__(self, learning_rate, momentum): + self.mom = optimizer.Momentum + self.learn_rate = learning_rate + self.momentum = momentum + + def apply_gradients(self, grads_and_vars, **kwargs): + grads, vars = list(zip(*grads_and_vars)) + optimizer_mom = self.mom(vars, learning_rate=self.learn_rate, momentum=self.momentum, **kwargs) + optimizer_mom(grads) + + +class Lamb(object): + + def __init__( + self, decay_steps, warmup_steps=0, start_learning_rate=0.1, end_learning_rate=0.0001, power=1.0, beta1=0.9, + beta2=0.999, eps=1e-06, weight_decay=0.0 + ): + self.lamb = optimizer.Lamb + self.decay_steps = decay_steps + self.warmup_steps = warmup_steps + self.start_learning_rate = start_learning_rate + self.end_learning_rate = end_learning_rate + self.power = power + self.beta1 = beta1 + self.beta2 = beta2 + self.eps = eps + self.weight_decay = weight_decay + + def apply_gradients(self, grads_and_vars): + grads, vars = list(zip(*grads_and_vars)) + optimizer_lamb = self.lamb( + params=vars, decay_steps=self.decay_steps, warmup_steps=self.warmup_steps, + start_learning_rate=self.start_learning_rate, end_learning_rate=self.end_learning_rate, power=self.power, + beta1=self.beta1, beta2=self.beta2, eps=self.eps, weight_decay=self.weight_decay + ) + optimizer_lamb(grads) + + +class LARS(object): + + def __init__(self, optimizer, **kwargs): + self.lars = ms.nn.LARS(optimizer=optimizer, **kwargs) + + def apply_gradients(self, grads_and_vars): + grads, _ = list(zip(*grads_and_vars)) + self.lars(grads) diff --git a/tensorlayer/optimizers/tensorflow_optimizer.py b/tensorlayer/optimizers/tensorflow_optimizer.py new file mode 100644 index 000000000..0cae4cc8a --- /dev/null +++ b/tensorlayer/optimizers/tensorflow_optimizer.py @@ -0,0 +1,45 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +import tensorflow as tf + +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + +# Add module aliases + +# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' +Adadelta = tf.optimizers.Adadelta + +# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' +Adagrad = tf.optimizers.Adagrad + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' +Adam = tf.optimizers.Adam + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' +Admax = tf.optimizers.Adamax + +# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, +# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 +Ftrl = tf.optimizers.Ftrl + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', +Nadam = tf.optimizers.Nadam + +# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' +RMSprop = tf.optimizers.RMSprop + +# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' +SGD = tf.optimizers.SGD + +# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False +Momentum = tf.compat.v1.train.MomentumOptimizer + + +def Lamb(**kwargs): + raise Exception('Lamb optimizer function not implemented') + + +def LARS(**kwargs): + raise Exception('LARS optimizer function not implemented') diff --git a/tensorlayer/package_info.py b/tensorlayer/package_info.py index e21969abd..de5a88430 100644 --- a/tensorlayer/package_info.py +++ b/tensorlayer/package_info.py @@ -4,7 +4,7 @@ MAJOR = 2 MINOR = 2 -PATCH = 3 +PATCH = 0 PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, prerelease) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) diff --git a/tensorlayer/rein.py b/tensorlayer/rein.py index e5cbe6bd4..bd884d51a 100644 --- a/tensorlayer/rein.py +++ b/tensorlayer/rein.py @@ -81,10 +81,10 @@ def cross_entropy_reward_loss(logits, actions, rewards, name=None): ---------- >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D]) >>> network = InputLayer(states_batch_pl, name='input') - >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1') + >>> network = DenseLayer(network, n_units=H, act=tf.ops.relu, name='relu1') >>> network = DenseLayer(network, n_units=3, name='out') >>> probs = network.outputs - >>> sampling_prob = tf.nn.softmax(probs) + >>> sampling_prob = tf.ops.softmax(probs) >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None]) >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None]) >>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl) From 4d352201a5c39f6d139d1c1bdf4f063acbf773f1 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Thu, 17 Dec 2020 11:12:21 +0800 Subject: [PATCH 02/36] yapf --- .../basic_tutorials/tutorial_mnist_simple.py | 12 +- tensorlayer/__init__.py | 1 + tensorlayer/backend/ops/__init__.py | 7 +- tensorlayer/backend/ops/dragon_backend.py | 1 + tensorlayer/backend/ops/load_backend.py | 3 +- tensorlayer/backend/ops/mindspore_backend.py | 1 + tensorlayer/backend/ops/mindspore_nn.py | 130 ++++- tensorlayer/backend/ops/tensorflow_backend.py | 1 + tensorlayer/backend/ops/tensorflow_nn.py | 220 +++++++++ tensorlayer/dataflow/__init__.py | 17 +- tensorlayer/dataflow/base.py | 18 - tensorlayer/dataflow/common.py | 34 -- tensorlayer/dataflow/load_data_backend.py | 9 - tensorlayer/dataflow/mindspore_data.py | 266 ++++++++++- tensorlayer/dataflow/mindspore_image.py | 305 ++++++++++++ tensorlayer/dataflow/tensorflow_data.py | 236 ++++++++- tensorlayer/dataflow/tensorflow_image.py | 200 ++++++++ tensorlayer/layers/activation.py | 1 + tensorlayer/layers/convolution/__init__.py | 5 +- .../layers/convolution/simplified_conv.py | 449 +++++++++++++++++- tensorlayer/models/core.py | 2 - 21 files changed, 1812 insertions(+), 106 deletions(-) delete mode 100644 tensorlayer/dataflow/base.py delete mode 100644 tensorlayer/dataflow/common.py delete mode 100644 tensorlayer/dataflow/load_data_backend.py create mode 100644 tensorlayer/dataflow/mindspore_image.py create mode 100644 tensorlayer/dataflow/tensorflow_image.py diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index ad0c4685b..e55f67e63 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -7,6 +7,7 @@ os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' + import tensorflow as tf import tensorlayer as tl from tensorlayer.layers import Module @@ -45,7 +46,7 @@ def generator_train(): if len(inputs) != len(targets): raise AssertionError("The length of inputs and targets should be equal") for _input, _target in zip(inputs, targets): - yield _input, _target + yield (_input, np.array(_target)) MLP = CustomModel() @@ -57,9 +58,12 @@ def generator_train(): train_weights = MLP.trainable_weights optimizer = tl.optimizers.Momentum(0.05, 0.9) -train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.batch(batch_size) +train_ds = tl.dataflow.FromGenerator( + generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] +) +train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) +train_ds = tl.dataflow.Batch(train_ds,batch_size) + model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index 3fab2fb2f..7d543b20a 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -50,6 +50,7 @@ from tensorlayer import optimizers from tensorlayer import rein from tensorlayer import utils + from tensorlayer import dataflow from tensorlayer.lazy_imports import LazyImport diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index b49fe7326..53ce221e5 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -27,9 +27,9 @@ from .load_backend import avg_pool3d from .load_backend import pool from .load_backend import depthwise_conv2d -from .load_backend import conv1d_transpose -from .load_backend import conv2d_transpose -from .load_backend import conv3d_transpose +from .load_backend import Conv1d_transpose +from .load_backend import Conv2d_transpose +from .load_backend import Conv3d_transpose from .load_backend import ReLU from .load_backend import ReLU6 @@ -113,3 +113,4 @@ from .load_backend import Unstack from .load_backend import Sign from .load_backend import Resize + diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py index 2b7b8a03b..821c63996 100644 --- a/tensorlayer/backend/ops/dragon_backend.py +++ b/tensorlayer/backend/ops/dragon_backend.py @@ -926,6 +926,7 @@ def __call__(self, *args, **kwargs): pass + class Resize: def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 7f0e25332..ed4e48062 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -57,7 +57,8 @@ import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') + #context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), + context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index d54a4c70f..6e26403ce 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -830,6 +830,7 @@ def construct(self, input, multiples): return self.tile(input, tuple(multiples)) + def tile(input, multiples): """ Constructs a tensor by tiling a given tensor. diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 73e259f08..95c0ed156 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -14,6 +14,7 @@ from mindspore._extends import cell_attr_register + def padding_format(padding): """ Checks that the padding format correspond format. @@ -459,8 +460,35 @@ def bias_add(x, bias): class Conv1D(Cell): - pass - # raise NotImplementedError + + def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None): + super(Conv1D, self).__init__() + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + self.stride = (1, stride) + self.dilations = (1, dilations) + self.k_size = (1, k_size) + self.out_channel = out_channel + + self.conv2d = P.Conv2D( + out_channel=self.out_channel, kernel_size=self.k_size, pad_mode=self.padding, stride=self.stride, + dilation=self.dilations, mode=1, group=1 + ) + + self.expand_dims = P.ExpandDims() + self.squeeze = P.Squeeze(2) + self.shape = P.Shape() + + def construct(self, x, filters): + if self.data_format == 'NWC': + x = nhwc_to_nchw(x) + + x = self.expand_dims(x, 2) + filters = self.expand_dims(filters, 2) + + output = self.conv2d(x, filters) + output = self.squeeze(output) + + return output def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): @@ -843,6 +871,7 @@ def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_ pass + class DepthwiseConv2d(Cell): def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): @@ -894,6 +923,51 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation pass +class Conv1d_transpose(Cell): + + def __init__(self, strides, padding, data_format, dilations=None, out_channel=None, k_size=None, in_channels=None): + super(Conv1d_transpose, self).__init__() + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + self.in_channels = in_channels + self.out_channel = out_channel + self.strides = (1, strides) + self.dilations = (1, dilations) + self.k_size = (1, k_size) + + self.conv2d_transpose = P.Conv2DBackpropInput( + out_channel=self.in_channels, kernel_size=self.k_size, pad_mode=self.padding, stride=self.strides, + dilation=self.dilations, mode=1, group=1 + ) + self.shape = P.Shape() + self.expand_dims = P.ExpandDims() + self.squeeze = P.Squeeze(2) + + def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size): + length = 0 + filter_size = filter_size + (filter_size - 1) * (dilation_size - 1) + + if self.padding == 'same': + length = input_length * stride_size + elif self.padding == 'valid': + length = input_length * stride_size + max(filter_size - stride_size, 0) + + return length + + def construct(self, x, filters): + if self.data_format == 'NWC': + x = nhwc_to_nchw(x) + x = self.expand_dims(x, 2) + filters = self.expand_dims(filters, 2) + n, _, h, w = self.shape(x) + + h_out = self._deconv_output_length(h, self.k_size[0], self.strides[0], self.dilations[0]) + w_out = self._deconv_output_length(w, self.k_size[1], self.strides[1], self.dilations[1]) + output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) + output = self.squeeze(output) + + return output + + def conv1d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None ): @@ -931,6 +1005,53 @@ def conv1d_transpose( pass +class Conv2d_transpose(Cell): + + def __init__(self, strides, padding, data_format, dilations=None, out_channel=None, k_size=None, in_channels=None): + super(Conv2d_transpose, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.in_channels = in_channels + self.out_channel = out_channel + + self.k_size = k_size + if self.data_format == 'NHWC': + self.strides = (strides[1], strides[2]) + self.dilations = (dilations[1], dilations[2]) + elif self.data_format == 'NCHW': + self.strides = (strides[2], strides[3]) + self.dilations = (dilations[2], dilations[3]) + + self.conv2d_transpose = P.Conv2DBackpropInput( + out_channel=self.in_channels, kernel_size=self.k_size, pad_mode=self.padding, stride=self.strides, + dilation=self.dilations, mode=1, group=1 + ) + self.shape = P.Shape() + + def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size): + length = 0 + filter_size = filter_size + (filter_size - 1) * (dilation_size - 1) + + if self.padding == 'same': + length = input_length * stride_size + elif self.padding == 'valid': + length = input_length * stride_size + max(filter_size - stride_size, 0) + + return length + + def construct(self, x, filters): + if self.data_format == 'NHWC': + x = nhwc_to_nchw(x) + + n, _, h, w = self.shape(x) + + h_out = self._deconv_output_length(h, self.k_size[0], self.strides[0], self.dilations[0]) + w_out = self._deconv_output_length(w, self.k_size[1], self.strides[1], self.dilations[1]) + + output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) + + return output + + def conv2d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None ): @@ -968,6 +1089,10 @@ def conv2d_transpose( pass +class Conv3d_transpose(Cell): + pass + + def conv3d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None ): @@ -1003,6 +1128,7 @@ def conv3d_transpose( pass + class BatchNorm(Cell): """Batch Normalization base class.""" diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index 2f381fb4d..91ce8343f 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -895,6 +895,7 @@ def __call__(self, input, axis=None): return tf.math.count_nonzero(input, axis=axis, keepdims=self.keepdims, dtype=self.dtype) + class Resize: def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 5e2d386c5..2fa178d04 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -950,6 +950,66 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation return outputs +class Conv1d_transpose(object): + + def __init__( + self, strides, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + + def __call__(self, input, filters): + batch_size = input.shape[0] + if self.data_format == 'NWC': + w_axis, c_axis = 1, 2 + else: + w_axis, c_axis = 2, 1 + + input_shape = input.shape.as_list() + filters_shape = filters.shape.as_list() + input_w = input_shape[w_axis] + filters_w = filters_shape[0] + output_channels = filters_shape[1] + dilations_w = 1 + + if isinstance(self.strides, int): + strides_w = self.strides + else: + strides_list = list(self.strides) + strides_w = strides_list[w_axis] + + if self.dilations is not None: + if isinstance(self.dilations, int): + dilations_w = self.dilations + else: + dilations_list = list(self.dilations) + dilations_w = dilations_list[w_axis] + + filters_w = filters_w + (filters_w - 1) * (dilations_w - 1) + assert self.padding in {'SAME', 'VALID'} + if self.padding == 'VALID': + output_w = input_w * strides_w + max(filters_w - strides_w, 0) + elif self.padding == 'SAME': + output_w = input_w * strides_w + + if self.data_format == 'NCW': + output_shape = (batch_size, output_channels, output_w) + else: + output_shape = (batch_size, output_w, output_channels) + output_shape = tf.stack(output_shape) + outputs = tf.nn.conv1d_transpose( + input=input, + filters=filters, + output_shape=output_shape, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + return outputs + + def conv1d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None ): @@ -999,6 +1059,81 @@ def conv1d_transpose( return outputs +class Conv2d_transpose(object): + + def __init__( + self, strides, padding, data_format='NHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.name = name + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + + def __call__(self, input, filters): + if self.data_format == 'NHWC': + h_axis, w_axis = 1, 2 + else: + h_axis, w_axis = 2, 3 + + input_shape = input.shape.as_list() + filters_shape = filters.shape.as_list() + batch_size = input.shape[0] + input_h, input_w = input_shape[h_axis], input_shape[w_axis] + kernel_h, kernel_w = filters_shape[0], filters_shape[1] + output_channels = filters_shape[2] + dilations_h, dilations_w = 1, 1 + + if isinstance(self.strides, int): + strides_h = self.strides + strides_w = self.strides + else: + strides_list = list(self.strides) + if len(strides_list) != 4: + strides_h = strides_list[0] + strides_w = strides_list[1] + else: + strides_h = strides_list[h_axis] + strides_w = strides_list[w_axis] + + if self.dilations is not None: + if isinstance(self.dilations, int): + dilations_h = self.dilations + dilations_w = self.dilations + else: + dilations_list = list(self.dilations) + if len(dilations_list) != 4: + dilations_h = dilations_list[0] + dilations_w = dilations_list[1] + else: + dilations_h = dilations_list[h_axis] + dilations_w = dilations_list[w_axis] + + kernel_h = kernel_h + (kernel_h - 1) * (dilations_h - 1) + kernel_w = kernel_w + (kernel_w - 1) * (dilations_w - 1) + + assert self.padding in {'SAME', 'VALID'} + if self.padding == 'VALID': + output_h = input_h * strides_h + max(kernel_h - strides_h, 0) + output_w = input_w * strides_w + max(kernel_w - strides_w, 0) + elif self.padding == 'SAME': + output_h = input_h * strides_h + output_w = input_w * strides_w + + if self.data_format == 'NCHW': + out_shape = (batch_size, output_channels, output_h, output_w) + else: + out_shape = (batch_size, output_h, output_w, output_channels) + + output_shape = tf.stack(out_shape) + + outputs = tf.nn.conv2d_transpose( + input=input, filters=filters, output_shape=output_shape, strides=self.strides, padding=self.padding, + data_format=self.data_format, dilations=self.dilations, name=self.name + ) + return outputs + + def conv2d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None ): @@ -1048,6 +1183,91 @@ def conv2d_transpose( return outputs +class Conv3d_transpose(object): + + def __init__( + self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.name = name + self.out_channel = out_channel + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + def __call__(self, input, filters): + if self.data_format == 'NDHWC': + d_axis, h_axis, w_axis = 1, 2, 3 + else: + d_axis, h_axis, w_axis = 2, 3, 4 + + input_shape = input.shape.as_list() + filters_shape = filters.shape.as_list() + batch_size = input_shape[0] + input_d, input_h, input_w = input_shape[d_axis], input_shape[h_axis], input_shape[w_axis] + kernel_d, kernel_h, kernel_w = filters_shape[0], filters_shape[1], filters_shape[2] + dilations_d, dilations_h, dilations_w = 1, 1, 1 + + if isinstance(self.strides, int): + strides_d, strides_h, strides_w = self.strides + else: + strides_list = list(self.strides) + if len(strides_list) != 5: + strides_d, strides_h, strides_w = \ + strides_list[0], \ + strides_list[1], \ + strides_list[2] + else: + strides_d, strides_h, strides_w = \ + strides_list[d_axis], \ + strides_list[h_axis], \ + strides_list[w_axis] + + if self.dilations is not None: + if isinstance(self.dilations, int): + dilations_d, dilations_h, dilations_w = self.dilations + else: + dilations_list = list(self.dilations) + if len(dilations_list) != 5: + dilations_d, dilations_h, dilations_w = \ + dilations_list[0], \ + dilations_list[1], \ + dilations_list[2] + else: + dilations_d, dilations_h, dilations_w = \ + dilations_list[d_axis],\ + dilations_list[h_axis], \ + dilations_list[w_axis] + + assert self.padding in {'VALID', 'SAME'} + + kernel_d = kernel_d + (kernel_d - 1) * (dilations_d - 1) + kernel_h = kernel_h + (kernel_h - 1) * (dilations_h - 1) + kernel_w = kernel_w + (kernel_w - 1) * (dilations_w - 1) + + if self.padding == 'VALID': + output_d = input_d * strides_d + max(kernel_d - strides_d, 0) + output_h = input_h * strides_h + max(kernel_h - strides_h, 0) + output_w = input_w * strides_w + max(kernel_w - strides_w, 0) + elif self.padding == 'SAME': + output_d = input_d * strides_d + output_h = input_h * strides_h + output_w = input_w * strides_w + + if self.data_format == 'NDHWC': + output_shape = (batch_size, output_d, output_h, output_w, self.out_channel) + else: + output_shape = (batch_size, self.out_channel, output_d, output_h, output_w) + + output_shape = tf.stack(output_shape) + outputs = tf.nn.conv3d_transpose( + input=input, filters=filters, output_shape=output_shape, strides=self.strides, padding=self.padding, + data_format=self.data_format, dilations=self.dilations, name=self.name + ) + + return outputs + + def conv3d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None ): diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py index b6fbab28e..f0c6c284d 100644 --- a/tensorlayer/dataflow/__init__.py +++ b/tensorlayer/dataflow/__init__.py @@ -2,4 +2,19 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function -from .common import Dataset +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_data import * + from .tensorflow_image import * + +elif BACKEND == 'mindspore': + from .mindspore_data import * + from .mindspore_image import * + +elif BACKEND == 'dragon': + pass + +else: + raise NotImplementedError("This backend is not supported") + diff --git a/tensorlayer/dataflow/base.py b/tensorlayer/dataflow/base.py deleted file mode 100644 index 41450f8be..000000000 --- a/tensorlayer/dataflow/base.py +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np - - -def generator(X_train, y_train=None): - inputs = X_train - targets = y_train - if targets is None: - for _input in X_train: - yield _input - else: - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield (_input, np.array([_target])) diff --git a/tensorlayer/dataflow/common.py b/tensorlayer/dataflow/common.py deleted file mode 100644 index aadd3dbd8..000000000 --- a/tensorlayer/dataflow/common.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -from .load_data_backend import * - - -class Dataset(object): - - def __init__(self): - pass - - @staticmethod - def from_generator(generator, output_types, output_shapes=None, args=None): - return FromGenerator(generator, output_types, output_shapes=output_shapes, args=args) - - @staticmethod - def map(ds, map_func, num_parallel_calls=None): - return Map(ds=ds, map_func=map_func, num_parallel_calls=num_parallel_calls) - - @staticmethod - def shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): - return Shuffle(ds=ds, buffer_size=buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) - - @staticmethod - def prefetch(ds, buffer_size): - return Prefetch(ds=ds, buffer_size=buffer_size) - - @staticmethod - def batch(ds, batch_size, drop_remainder=False): - return Batch(ds=ds, batch_size=batch_size, drop_remainder=drop_remainder) - - @staticmethod - def repeat(ds, count): - return Repeat(ds, count=count) diff --git a/tensorlayer/dataflow/load_data_backend.py b/tensorlayer/dataflow/load_data_backend.py deleted file mode 100644 index 7fe8d12e0..000000000 --- a/tensorlayer/dataflow/load_data_backend.py +++ /dev/null @@ -1,9 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -from tensorlayer.backend.ops.load_backend import BACKEND - -if BACKEND == 'tensorflow': - pass -if BACKEND == 'mindspore': - pass diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py index 27e647a8a..f42a1d4f9 100644 --- a/tensorlayer/dataflow/mindspore_data.py +++ b/tensorlayer/dataflow/mindspore_data.py @@ -1,21 +1,142 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import mindspore.dataset as dataset +import mindspore.dataset as ds +import mindspore as ms +from enum import Enum +__all__ = [ + 'Apply', + 'Batch', + 'Concat', + 'CsvDataset', + 'Filter', + 'Flat_map', + 'FromGenerator', + 'FromSlices', + 'Map', + 'Prefetch', + 'Repeat', + 'Shuffle', + 'Skip', + 'Take', + 'TextFlieDataset', + 'TFRecordDataset', +] -__all__ = ['FromGenerator', 'Map', 'Shuffle', 'Prefetch', 'Batch', 'Repeat'] +class Shuffle(str, Enum): + GLOBAL: str = "global" + FILES: str = "file" -def FromGenerator(generator, output_types, output_shapes=None, args=None): - pass +print(Shuffle.GLOBAL) -def Map(ds, map_func, num_parallel_calls=None): + +def Apply(dataset, transformation_func): + + return dataset.apply(transformation_func) + + +def Batch( + dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None, inut_columns=None, + output_columns=None, column_order=None, pad_info=None +): + ''' + Combine batch_size number of consecutive rows into batches. + Parameters + ---------- + dataset + batch_size + drop_remainder + num_parallel_workers + per_batch_map + inut_columns + output_columns + column_order + pad_info + + Returns + ------- + + ''' + return dataset.batch( + batch_size=batch_size, drop_remainder=drop_remainder, num_parallel_workers=num_parallel_workers, + per_batch_map=per_batch_map, input_columns=inut_columns, output_columns=output_columns, + column_order=column_order, pad_info=pad_info + ) + + +def Concat(dataset_1, dataset_2): + + return dataset_1.concat(dataset_2) + + +def CsvDataset( + file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, + field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=Shuffle.GLOBAL, + shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, + num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, + shard_id=None, cache=None +): + """ + A source dataset that reads and parses comma-separated values (CSV) datasets. + + Examples: + >>> import mindspore.dataset as dataset + >>> + >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files + >>> dataset = dataset.CSVDataset(dataset_files=dataset_files, column_names=['col1', 'col2', 'col3', 'col4']) + """ + return ds.CSVDataset( + dataset_files=file_pattern, field_delim=field_delim, column_defaults=column_defaults, column_names=column_names, + num_samples=numples_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, + shard_id=shard_id, cache=cache + ) + + +def Filter(dataset, predicate): + + return dataset.filter(predicate) + + +def Flat_map(dataset, map_func): + + return dataset.flat_map(map_func) + + +def FromGenerator( + generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, + num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, + python_multiprocessing=True +): + + return ds.GeneratorDataset( + source=generator, column_names=column_names, column_types=column_types, schema=schema, num_samples=num_samples, + num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler, num_shards=num_shards, + shard_id=shard_id, python_multiprocessing=python_multiprocessing + ) + + +def FromSlices( + tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, + shard_id=None +): + + return ds.NumpySlicesDataset( + data=tensor, column_names=column_names, num_samples=num_samples, num_parallel_workers=num_parallel_workers, + shuffle=shuffle, sampler=sampler, num_shards=num_shards, shard_id=shard_id + ) + + +def Map( + dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, + num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None +): """ Maps map_func across the elements of this dataset. Parameters ---------- - ds : DataFlow + dataset : DataFlow input DataFlow map_func : function A function mapping a dataset element to another dataset element. @@ -25,20 +146,135 @@ def Map(ds, map_func, num_parallel_calls=None): ------- """ - pass + return dataset.map( + operations=map_func, input_columns=input_columns, output_columns=output_columns, column_order=column_order, + num_parallel_workers=num_parallel_workers, python_multiprocessing=python_multiprocessing, cache=cache, + callbacks=callbacks + ) + + +def Prefetch(dataset, buffer_size): + + batch_size = dataset.get_batch_size() + prefetch_size = batch_size * buffer_size + return dataset.config.set_prefetch_size(prefetch_size) -def Shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): - pass -def Prefetch(ds, buffer_size): - pass +def Repeat(dataset, count=None): -def Batch(ds, batch_size, drop_remainder=False): - pass + return dataset.repeat(count) -def Repeat(ds, count=None): - pass +def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): + + #dataset.config.set_seed(seed) + + return dataset.shuffle(buffer_size) + + +def Skip(dataset, count): + ''' + Creates a Dataset that skips count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. + + + Returns + ------- + + ''' + return dataset.skip(count) + + +def Take(dataset, count): + ''' + Creates a Dataset with at most count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. + If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. + Returns + ------- + + ''' + return dataset.take(count) + + +def TextFlieDataset( + filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, + num_shards=None, shard_id=None, cache=None +): + """ + A source dataset that reads and parses datasets stored on disk in text format. + The generated dataset has one column ['text']. + + Examples: + >>> import mindspore.dataset as dataset + >>> + >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files + >>> dataset = dataset.TextFileDataset(dataset_files=dataset_files) + """ + if shuffle is None: + shuffle = Shuffle.GLOBAL + return ds.TextFileDataset( + dataset_files=filenames, num_samples=num_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, + num_shards=num_shards, shard_id=shard_id, cache=cache + ) + + +def TFRecordDataset( + filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, + num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None +): + """ + A source dataset that reads and parses datasets stored on disk in TFData format. + + Examples: + >>> import mindspore.dataset as dataset + >>> import mindspore.common.dtype as mstype + >>> + >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple tf data files + >>> + >>> # 1) Get all rows from dataset_files with no explicit schema + >>> # The meta-data in the first row will be used as a schema. + >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files) + >>> + >>> # 2) Get all rows from dataset_files with user-defined schema + >>> schema = dataset.Schema() + >>> schema.add_column('col_1d', de_type=mindspore.int64, shape=[2]) + >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema=schema) + >>> + >>> # 3) Get all rows from dataset_files with schema file "./schema.json" + >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema="./schema.json") + """ + if shuffle is None: + shuffle = Shuffle.GLOBAL + return ds.TFRecordDataset( + dataset_files=filenames, schema=schema, columns_list=columns_list, num_samples=num_samples, + num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, + shard_equal_rows=shard_equal_rows, cache=cache + ) + + +def Zip(datasets): + ''' + Creates a Dataset by zipping together the given datasets. + Parameters + ---------- + datasets: + A tuple of datasets to be zipped together. + Returns + ------- + + ''' + return ds.zip(datasets) diff --git a/tensorlayer/dataflow/mindspore_image.py b/tensorlayer/dataflow/mindspore_image.py new file mode 100644 index 000000000..e4c1fd9ab --- /dev/null +++ b/tensorlayer/dataflow/mindspore_image.py @@ -0,0 +1,305 @@ +import mindspore.dataset as ms +import mindspore.dataset.vision.c_transforms as c_vision +import mindspore.dataset.vision.py_transforms as py_vision +import mindspore.dataset.vision.py_transforms_util as py_util +import numpy as np +from PIL import Image, ImageOps, ImageEnhance, __version__ + +__all__ = [ + 'CentralCrop', 'HsvToRgb', 'AdjustBrightness', 'AdjustContrast', 'AdjustHue', 'Crop', 'FlipHorizontal', + 'FlipVertical', 'GrayToRgb', 'RgbToGray', 'PadToBoundingBox' +] + +augment_error_message = 'img should be PIL image. Got {}. Use Decode() for encoded data or ToPIL() for decoded data.' + + +def CentralCrop(image, central_fraction=None, size=None): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth], + or a 4-D Tensor of shape [batch_size, height, width, depth]. + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D / 4-D float Tensor, as per the input. + ------- + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if size is None: + outshape = np.shape(image) + if len(outshape) == 3: + h_axis = 0 + w_axis = 1 + elif len(outshape) == 4: + h_axis = 1 + w_axis = 2 + + height = outshape[h_axis] + width = outshape[w_axis] + + target_height = height * central_fraction + target_width = width * central_fraction + + size = (target_height, target_width) + + return py_util.center_crop(image, size) + + +def HsvToRgb(image, is_hwc=True): + + image = np.asarray(image) + + return py_util.hsv_to_rgbs(image, is_hwc=is_hwc) + + +def AdjustBrightness(image, factor): + ''' + + Parameters + ---------- + image: + input NumPy image array or PIL image + factor: + factor should be in the range (-1,1) + Returns: + ------- + np darray image + ''' + + image = np.asarray(image) + image = image / 255 + image = image + factor + index = np.where(image > 1) + image[index] = 1 + index = np.where(image < 0) + image[index] = 0 + image = image * 255 + + return image + + +def AdjustContrast(image, factor): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image = ImageEnhance.Contrast(image).enhance(factor) + + image = np.array(image) + + return image + + +def AdjustHue(image, factor): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image_hue_factor = factor + if not -1 <= image_hue_factor <= 1: + raise ValueError('image_hue_factor {} is not in [-1, 1].'.format(image_hue_factor)) + + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + mode = image.mode + if mode in {'L', '1', 'I', 'F'}: + return image + + hue, saturation, value = image.convert('HSV').split() + + np_hue = np.array(hue, dtype=np.uint8) + + with np.errstate(over='ignore'): + np_hue += np.uint8(image_hue_factor * 255) + hue = Image.fromarray(np_hue, 'L') + + image = Image.merge('HSV', (hue, saturation, value)).convert(mode) + return image + + +def AdjustSaturation(image, factor): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + enhancer = ImageEnhance.Color(image) + image = enhancer.enhance(factor) + return image + + +def Crop(image, offset_height, offset_width, target_height, target_width): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + image = np.array( + image.crop((offset_width, offset_height, offset_width + target_width, offset_width + target_height)) + ) + return image + + +def FlipHorizontal(image): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image = np.array(image.transpose(Image.FLIP_LEFT_RIGHT)) + + return image + + +def FlipVertical(image): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image = np.array(image.transpose(Image.FLIP_TOP_BOTTOM)) + + return image + + +def GrayToRgb(image): + + image = np.asarray(image) + shape = image.shape + output_image = np.zeros((shape[0], shape[1], 3), dtype=np.uint8) + if len(shape) == 3: + for i in range(3): + output_image[:, :, i] = image[:, :, 1] + elif len(shape) == 2: + for i in range(3): + output_image[:, :, i] = image + + return output_image + + +def RgbToGray(image): + + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + ''' + 将彩色图像转换为灰度(模式“L”)时,库使用ITU-R 601-2 Luma转换: + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + ''' + image = image.convert('L') + image = np.asarray(image) + + return image + + +def PadToBoundingBox(image, offset_height, offset_width, target_height, target_width): + ''' + + Parameters + ---------- + image: + A PIL image + offset_height: + Number of rows of zeros to add on top. + offset_width: + Number of columns of zeros to add on the left. + target_height: + Height of output image. + target_width + Width of output image. + Returns + A numpy ndarray image + ------- + ''' + + if offset_height < 0: + raise ValueError("offset_height must be >= 0") + if offset_width < 0: + raise ValueError("offset_width must be >= 0") + image = np.array(image) + shape = image.shape + top = offset_height + bottom = target_height - shape[0] - top + left = offset_width + right = target_width - shape[1] - left + + if bottom < 0: + raise ValueError("target_height must be >= offset_height + height") + + if right < 0: + raise ValueError("target_width must be >= offset_width + width") + + return np.pad(image, ((top, bottom), (left, right), (0, 0)), mode='constant') + + +def Standardization(image, mean=None, std=None, channel_mode=False): + ''' + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + image = np.array(image, dtype=np.float32) + num_shape = image.shape + if mean is not None and std is not None: + if len(mean) != len(std): + raise ValueError("Length of mean and std must be equal") + if len(mean) == 1: + mean = [mean[0]] * num_shape[2] + std = [std[0]] * num_shape[2] + mean = np.array(mean, dtype=image.dtype) + std = np.array(std, dtype=image.dtype) + return (image - mean[:, None, None]) / std[:, None, None] + elif mean is None and std is None: + if channel_mode: + num_pixels = num_shape[0] * num_shape[1] + image_mean = np.mean(image, axis=(0, 1)) + stddev = np.std(image, axis=(0, 1)) + min_sttdev = 1 / np.sqrt(num_pixels) + min_sttdev = [min_sttdev] * num_shape[2] + adjusted_sttdev = np.maximum(stddev, min_sttdev) + + image -= image_mean + image = np.divide(image, adjusted_sttdev) + return image + else: + num_pixels = num_shape[0] * num_shape[1] * num_shape[2] + image_mean = np.mean(image, axis=(0, 1, 2)) + image_mean = [image_mean] * 3 + stddev = np.std(image, axis=(0, 1, 2)) + min_sttdev = 1 / np.sqrt(num_pixels) + adjusted_sttdev = np.maximum(stddev, min_sttdev) + adjusted_sttdev = [adjusted_sttdev] * 3 + + image -= image_mean + image = np.divide(image, adjusted_sttdev) + return image + else: + raise ValueError('std and mean must both be None or not None') diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py index 39d887fd1..ce50c77d0 100644 --- a/tensorlayer/dataflow/tensorflow_data.py +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -3,19 +3,154 @@ import tensorflow as tf -__all__ = ['FromGenerator', 'Map', 'Shuffle', 'Prefetch', 'Batch', 'Repeat'] +__all__ = [ + 'Apply', + 'Batch', + 'Concat', + 'CsvDataset', + 'Filter', + 'Flat_map', + 'FromGenerator', + 'FromSlices', + 'Map', + 'Prefetch', + 'Repeat', + 'Shuffle', + 'Skip', + 'Take', + 'TextFlieDataset', + 'TFRecordDataset', + 'Zip', +] + + +def Apply(dataset, transformation_func): + """Applies a transformation function to this dataset. + `apply` enables chaining of custom `Dataset` transformations, which are + represented as functions that take one `Dataset` argument and return a + transformed `Dataset`. + >>> dataset = tf.data.Dataset.range(100) + >>> def dataset_fn(dataset): + ... return dataset.filter(lambda x: x < 5) + >>> dataset = dataset.apply(dataset_fn) + >>> list(dataset.as_numpy_iterator()) + [0, 1, 2, 3, 4] + Args: + transformation_func: A function that takes one `Dataset` argument and + returns a `Dataset`. + Returns: + Dataset: The `Dataset` returned by applying `transformation_func` to this + dataset. + """ + return dataset.apply(transformation_func) + + +def Batch(dataset, batch_size, drop_remainder=False): + ''' + Parameters + ---------- + dataset + batch_size + drop_remainder + + Returns + ------- + + ''' + return dataset.batch(batch_size=batch_size, drop_remainder=drop_remainder) + + +def Concat(dataset_1, dataset_2): -def FromGenerator(generator, output_types, output_shapes=None, args=None): + return dataset_1.concatenate(dataset_2) + + +def CsvDataset( + file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, + field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, + shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, + num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, + shard_id=None, cache=None +): + """Reads CSV files into a dataset. + Reads CSV files into a dataset, where each element is a (features, labels) + tuple that corresponds to a batch of CSV rows. The features dictionary + maps feature column names to `Tensor`s containing the corresponding + feature data, and labels is a `Tensor` containing the batch's label data. + """ + return tf.data.experimental.make_csv_dataset( + file_pattern, batch_size, column_names=None, column_defaults=None, label_name=None, select_columns=None, + field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, + shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, + num_rows_for_inference=100, compression_type=None, ignore_errors=False + ) + + +def Filter(dataset, predicate): + ''' + Filters this dataset according to predicate. + Parameters + ---------- + dataset : + A dataset + predicate : + A function mapping a dataset element to a boolean. + Returns : + The Dataset containing the elements of this dataset for which predicate is True. + ------- + + ''' + return dataset.filter(predicate) + + +def Flat_map(dataset, map_func): + ''' + Maps map_func across this dataset and flattens the result. + Parameters + ---------- + dataset: + A dataset + map_func + A function mapping a dataset element to a dataset. + Returns + A Dataset. + ------- + + ''' + return dataset.flat_map(map_func) + + +def FromGenerator( + generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, + num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, + python_multiprocessing=True +): + """Creates a `Dataset` whose elements are generated by `generator`. + + generator: + A callable object + """ return tf.data.Dataset.from_generator(generator, output_types, output_shapes=output_shapes, args=args) -def Map(ds, map_func, num_parallel_calls=None): +def FromSlices( + tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, + shard_id=None +): + + return tf.data.Dataset.from_tensor_slices(tensor) + + +def Map( + dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, + num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None +): """ Maps map_func across the elements of this dataset. Parameters ---------- - ds : DataFlow + dataset : DataFlow input DataFlow map_func : function A function mapping a dataset element to another dataset element. @@ -25,20 +160,95 @@ def Map(ds, map_func, num_parallel_calls=None): ------- """ - return ds.map(map_func, num_parallel_calls=num_parallel_calls) + return dataset.map(map_func, num_parallel_calls=num_parallel_calls) -def Shuffle(ds, buffer_size, seed=None, reshuffle_each_iteration=None): - return ds.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) +def Prefetch(dataset, buffer_size): + ''' + Creates a Dataset that prefetches elements from this dataset. + Parameters + ---------- + dataset: Dataflow + A dataset + buffer_size : + A tf.int64 scalar tf.Tensor, representing the maximum number of elements that will be buffered when prefetching. + Returns + A Dataset + ------- + + ''' + return dataset.prefetch(buffer_size=buffer_size) + + +def Repeat(dataset, count=None): + return dataset.repeat(count=count) + +def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): + return dataset.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) -def Prefetch(ds, buffer_size): - return ds.prefetch(buffer_size=buffer_size) +def Skip(dataset, count): + ''' + Creates a Dataset that skips count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. + If count is greater than the size of this dataset, the new dataset will contain no elements. + If count is -1, skips the entire dataset. + + Returns + ------- + + ''' + return dataset.skip(count) -def Batch(ds, batch_size, drop_remainder=False): - return ds.batch(batch_size=batch_size, drop_remainder=drop_remainder) +def Take(dataset, count): + ''' + Creates a Dataset with at most count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. + If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. + Returns + ------- + + ''' + return dataset.take(count) + + +def TextFlieDataset( + filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, + num_shards=None, shard_id=None, cache=None +): + + return tf.data.TextLineDataset(filenames, compression_type, buffer_size, num_parallel_reads) + + +def TFRecordDataset( + filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, + num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None +): + + return tf.data.TFRecordDataset(filenames, compression_type, buffer_size, num_parallel_reads) + + +def Zip(datasets): + ''' + Creates a Dataset by zipping together the given datasets. + Parameters + ---------- + datasets: + A tuple of datasets to be zipped together. + Returns + ------- -def Repeat(ds, count=None): - return ds.repeat(count=count) + ''' + return tf.data.Dataset.zip(datasets) diff --git a/tensorlayer/dataflow/tensorflow_image.py b/tensorlayer/dataflow/tensorflow_image.py new file mode 100644 index 000000000..39419b970 --- /dev/null +++ b/tensorlayer/dataflow/tensorflow_image.py @@ -0,0 +1,200 @@ +import tensorflow as tf +import numpy as np +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import ops +from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage +from tensorflow.python.framework import dtypes +from tensorflow.python.ops.image_ops_impl import convert_image_dtype +__all__ = [ + 'CentralCrop', + 'HsvToRgb', + 'AdjustBrightness', + 'AdjustContrast', + 'AdjustHue', + 'AdjustSaturation', + 'Crop', + 'FlipHorizontal', + 'FlipVertical', + 'GrayToRgb', + 'Standardization', +] + + +def CentralCrop(image, central_fraction=None, size=None): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth], + or a 4-D Tensor of shape [batch_size, height, width, depth]. + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D / 4-D float Tensor, as per the input. + ------- + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if central_fraction is None: + outshape = np.shape(image) + if len(outshape) == 3: + h_axis = 0 + w_axis = 1 + elif len(outshape) == 4: + h_axis = 1 + w_axis = 2 + + if isinstance(size, int): + target_height = size + target_width = size + elif isinstance(size, tuple): + target_height = size[0] + target_width = size[1] + + central_fraction = max(target_height // outshape[h_axis], target_width // outshape[w_axis]) + + return tf.image.central_crop(image, central_fraction) + + +def HsvToRgb(image): + + return tf.image.hsv_to_rgb(image) + + +def AdjustBrightness(image, factor): + + return tf.image.adjust_brightness(image, delta=factor) + + +def AdjustContrast(image, factor): + + return tf.image.adjust_contrast(image, contrast_factor=factor) + + +def AdjustHue(image, factor): + + return tf.image.adjust_hue(image, delta=factor) + + +def AdjustSaturation(image, factor): + + return tf.image.adjust_saturation(image, saturation_factor=factor) + + +def Crop(image, offset_height, offset_width, target_height, target_width): + ''' + + Parameters + ---------- + image: + A image or a batch of images + offset_height: + Vertical coordinate of the top-left corner of the result in the input. + offset_width: + Horizontal coordinate of the top-left corner of the result in the input. + target_height: + Height of the result. + target_width: + Width of the result. + + Returns: + Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] + ------- + ''' + + return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) + + +def FlipHorizontal(image): + + return tf.image.flip_left_right(image) + + +def FlipVertical(image): + + return tf.image.flip_up_down(image) + + +def GrayToRgb(image): + + return tf.image.grayscale_to_rgb(image) + + +def RgbToGray(image): + + return tf.image.rgb_to_grayscale(image) + + +def PadToBoundingBox(image, offset_height, offset_width, target_height, target_width): + + return tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width) + + +def Standardization(image, mean=None, std=None, channel_mode=False): + ''' + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + with ops.name_scope(None, 'Standardization', [image]) as scope: + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + if mean is not None and std is not None: + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + image -= mean + image = math_ops.divide(image, std, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + + elif mean is None and std is None: + if channel_mode: + num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:-1]) + #`num_pixels` is the number of elements in each channels of 'image' + image_mean = math_ops.reduce_mean(image, axis=[-2, -3], keepdims=True) + # `image_mean` is the mean of elements in each channels of 'image' + + stddev = math_ops.reduce_std(image, axis=[-2, -3], keepdims=True) + min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) + adjusted_sttdev = math_ops.maximum(stddev, min_stddev) + + image -= image_mean + image = math_ops.divide(image, adjusted_sttdev, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + + else: + num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) + #`num_pixels` is the number of elements in `image` + image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) + + # Apply a minimum normalization that protects us against uniform images. + stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) + min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) + adjusted_stddev = math_ops.maximum(stddev, min_stddev) + + image -= image_mean + image = math_ops.divide(image, adjusted_stddev, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + else: + raise ValueError('std and mean must both be None or not None') diff --git a/tensorlayer/layers/activation.py b/tensorlayer/layers/activation.py index 1a64b07a4..34788c672 100644 --- a/tensorlayer/layers/activation.py +++ b/tensorlayer/layers/activation.py @@ -302,4 +302,5 @@ def forward(self, inputs): pos = self.relu(inputs) pos_6 = -self.relu(inputs - 6) + alpha_high_constrained * self.relu(inputs - 6) neg = -alpha_low_constrained * self.relu(-inputs) + return pos + pos_6 + neg diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index d4763a0ea..ac7ff2f4d 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -32,8 +32,9 @@ 'Conv3d', # simplified deconv - # 'DeConv2d', - # 'DeConv3d', + 'DeConv1d', + 'DeConv2d', + 'DeConv3d', # expert conv # 'Conv1dLayer', diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index 677f00f71..ecee3ee6e 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -5,7 +5,15 @@ import tensorlayer as tl from tensorlayer import logging -__all__ = ['Conv1d', 'Conv2d', 'Conv3d'] + +__all__ = [ + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'DeConv1d', + 'DeConv2d', + 'DeConv3d', +] class Conv1d(Module): @@ -119,7 +127,7 @@ def build(self, inputs_shape): self.b_init_flag = False if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init) + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) self.bias_add = tl.ops.BiasAdd(self.data_format) self.b_init_flag = True @@ -139,6 +147,9 @@ def forward(self, inputs): outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) if self.act_init_flag: outputs = self.activate(outputs) + if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': + outputs = tl.nchw_to_nhwc(outputs) + return outputs @@ -422,3 +433,437 @@ def forward(self, inputs): if self.act_init_flag: outputs = self.activate(outputs) return outputs + + +class DeConv1d(Module): + """Simplified version of :class:`Deconv1dlayer`. + + Parameters + ---------- + n_filter : int + The number of filters + filter_size : int + The filter size + strides : int or list + An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. + output_shape : a 1-D Tensor + containing three elements, representing the output shape of the deconvolution op. + dilation_rate : int or list + Specifying the dilation rate to use for dilated convolution. + act : activation function + The function that is applied to the layer activations + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channel_last" (NWC, default) or "channels_first" (NCW). + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([8, 100, 1], name='input') + >>> conv1d = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='Deonv1d_1') + >>> print(conv1d) + >>> tensor = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='Deconv1d_2')(net) + >>> print(tensor) + + """ + + def __init__( + self, + n_filter=32, + filter_size=15, + strides=1, + act=None, + padding='SAME', + data_format="channels_last", + dilation_rate=1, + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None # 'conv1d_transpose' + ): + super(DeConv1d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self.strides = strides + self.padding = padding + self.data_format = data_format + self.dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "DeConv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s" % ( + self.name, n_filter, filter_size, strides, padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != 1: + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + elif self.data_format == 'channels_first': + self.data_format = 'NCW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size, self.n_filter, self.in_channels) + + # TODO : check + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.conv1d_transpose = tl.ops.Conv1d_transpose( + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilation_rate, + out_channel=self.n_filter, + k_size=self.filter_size, + in_channels=self.in_channels, + ) + + self.act_init_flag = False + if self.act: + self.activate = self.act + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv1d_transpose(inputs, self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.activate(outputs) + if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': + outputs = tl.nchw_to_nhwc(outputs) + return outputs + + +class DeConv2d(Module): + """Simplified version of :class:`Deconv2dLayer`. + + Parameters + ---------- + + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size. + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + output_shape : A 1-D Tensor + representing the output shape of the deconvolution op. + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([8, 3, 400, 400], name='input') + >>> conv2d_transpose = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_transpose_1') + >>> print(conv2d_transpose) + >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_transpose_2')(net) + >>> print(tensor) + + """ + + def __init__( + self, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + act=None, + padding='SAME', + data_format='channels_last', + dilation_rate=(1, 1), + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, # 'conv2d_transpose', + ): + super(DeConv2d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self._strides = self.strides = strides + self.padding = padding + self.data_format = data_format + self._dilation_rate = self.dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "DeConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + #TODO channels first filter shape [out_channel, in_channel, filter_h, filter_w] + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.n_filter, self.in_channels) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.conv2d_transpose = tl.ops.Conv2d_transpose( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]), in_channels=self.in_channels + ) + + self.act_init_flag = False + if self.act: + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv2d_transpose(inputs, self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + if tl.BACKEND == 'mindspore' and self.data_format == 'NHWC': + outputs = tl.nchw_to_nhwc(outputs) + return outputs + + +class DeConv3d(Module): + """Simplified version of :class:`Deconv3dLayer`. + + Parameters + ---AppData\Local\Continuum\anaconda3\envs\ms_tf\lib\site-packages\mindspore\common\api.py", line 412, in compile + result = self._executor.compile(obj, args_list, phase, use_vm) +RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of type 'std:------- + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (depth, height, width). + output_shape: + A 1-D Tensor representing the output shape of the deconvolution op. + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channels_last" (NDHWC, default) or "channels_first" (NCDHW). + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') + >>> deconv3d = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='deconv3d_1') + >>> print(deconv3d) + >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='deconv3d_2')(net) + >>> print(tensor) + + """ + + def __init__( + self, + n_filter=32, + filter_size=(3, 3, 3), + strides=(1, 1, 1), + act=None, + padding='SAME', + data_format='channels_last', + dilation_rate=(1, 1, 1), + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None # 'deconv3d', + ): + super(DeConv3d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self._strides = self.strides = strides + self.padding = padding + self.data_format = data_format + self._dilation_rate = self.dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "DeConv3d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NDHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], self._strides[2], 1] + self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], self.dilation_rate[2], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCDHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1], self._strides[2]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1], self._dilation_rate[2]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = ( + self.filter_size[0], self.filter_size[1], self.filter_size[2], self.n_filter, self.in_channels + ) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.conv3d_transpose = tl.ops.Conv3d_transpose( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]) + ) + + self.act_init_flag = False + if self.act: + self.activate = self.act() + self.act_init_flag = True + + def forward(self, inputs): + outputs = self.conv3d_transpose(inputs, self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.activate(outputs) + if tl.BACKEND == 'mindspore' and self.data_format == 'NDHWC': + outputs = tl.nchw_to_nhwc(outputs) + return outputs diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 2e4c640c5..15b54395b 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -325,8 +325,6 @@ def ms_train( start_time = time.time() train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in train_dataset: - X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) - y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) output = network(X_batch) loss_output = loss_fn(output, y_batch) grads = train_network(X_batch, y_batch) From 822996e75c5b3c01907eaf70df1c7b577de4cf70 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Thu, 17 Dec 2020 11:28:18 +0800 Subject: [PATCH 03/36] yapf --- tensorlayer/backend/ops/load_backend.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index ed4e48062..93b7c59c7 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -57,8 +57,8 @@ import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - #context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), - context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), + context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), + # context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) From dd9770d9ef5e324b737e4b1230d058b84174fd46 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 19 Apr 2021 11:28:30 +0800 Subject: [PATCH 04/36] update tl --- tensorlayer/__init__.py | 2 - tensorlayer/activation.py | 346 ----- tensorlayer/backend/__init__.py | 1 + tensorlayer/backend/ops/__init__.py | 8 + tensorlayer/backend/ops/dragon_backend.py | 33 + tensorlayer/backend/ops/load_backend.py | 4 +- tensorlayer/backend/ops/mindspore_backend.py | 154 +- tensorlayer/backend/ops/mindspore_nn.py | 15 +- tensorlayer/backend/ops/tensorflow_backend.py | 60 +- tensorlayer/backend/ops/tensorflow_nn.py | 6 +- tensorlayer/dataflow/mindspore_data.py | 3 - tensorlayer/files/utils.py | 1 + tensorlayer/layers/__init__.py | 2 +- tensorlayer/layers/activation.py | 313 +++- tensorlayer/layers/convolution/__init__.py | 4 +- .../layers/convolution/deformable_conv.py | 324 +++++ .../layers/convolution/depthwise_conv.py | 6 + .../layers/convolution/simplified_conv.py | 50 +- tensorlayer/layers/core/common.py | 140 ++ tensorlayer/layers/core/core_mindspore.py | 378 +++-- .../layers/core/core_tensorflow_dragon.py | 512 ++++--- tensorlayer/layers/dense/__init__.py | 16 +- tensorlayer/layers/dense/base_dense.py | 6 + tensorlayer/layers/dense/binary_dense.py | 109 ++ tensorlayer/layers/dense/dorefa_dense.py | 116 ++ tensorlayer/layers/dense/dropconnect.py | 10 +- tensorlayer/layers/dense/quan_dense.py | 5 + tensorlayer/layers/dense/quan_dense_bn.py | 188 +++ tensorlayer/layers/dense/ternary_dense.py | 109 ++ tensorlayer/layers/dropout.py | 10 - tensorlayer/layers/embedding.py | 19 +- tensorlayer/layers/image_resampling.py | 33 +- tensorlayer/layers/inputs.py | 4 +- tensorlayer/layers/lambda_layers.py | 280 ++++ tensorlayer/layers/merge.py | 27 +- tensorlayer/layers/noise.py | 9 +- tensorlayer/layers/normalization.py | 23 +- tensorlayer/layers/padding.py | 8 +- tensorlayer/layers/pooling.py | 149 +- tensorlayer/layers/recurrent.py | 1264 +++++++++++++++++ tensorlayer/layers/utils.py | 11 + tensorlayer/optimizers/mindspore_optimizer.py | 6 +- tensorlayer/prepro.py | 7 +- 43 files changed, 3772 insertions(+), 999 deletions(-) delete mode 100644 tensorlayer/activation.py create mode 100644 tensorlayer/layers/convolution/deformable_conv.py create mode 100644 tensorlayer/layers/dense/binary_dense.py create mode 100644 tensorlayer/layers/dense/dorefa_dense.py create mode 100644 tensorlayer/layers/dense/quan_dense_bn.py create mode 100644 tensorlayer/layers/dense/ternary_dense.py create mode 100644 tensorlayer/layers/lambda_layers.py create mode 100644 tensorlayer/layers/recurrent.py diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index 7d543b20a..b111a3edb 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -36,7 +36,6 @@ " - `pip install --upgrade tensorflow-gpu`" ) - from tensorlayer import activation from tensorlayer import array_ops from tensorlayer import cost from tensorlayer import decorators @@ -63,7 +62,6 @@ visualize = LazyImport("tensorlayer.visualize") # alias - act = activation vis = visualize alphas = array_ops.alphas diff --git a/tensorlayer/activation.py b/tensorlayer/activation.py deleted file mode 100644 index fcdd52fe1..000000000 --- a/tensorlayer/activation.py +++ /dev/null @@ -1,346 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -"""A file containing various activation functions.""" - -import tensorflow as tf - -from tensorlayer.decorators import deprecated - -__all__ = [ - 'leaky_relu', - 'leaky_relu6', - 'leaky_twice_relu6', - 'lrelu', - 'lrelu6', - 'ltrelu6', - 'ramp', - 'swish', - 'sign', - 'htanh', - 'hard_tanh', - 'pixel_wise_softmax', -] - - -def ramp(x, v_min=0, v_max=1, name=None): - """Ramp activation function. - - Reference: [tf.clip_by_value] - - Parameters - ---------- - x : Tensor - input. - v_min : float - cap input to v_min as a lower bound. - v_max : float - cap input to v_max as a upper bound. - name : str - The function name (optional). - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - """ - return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) - - -# @deprecated(date="2018-09-30", instructions="This API is deprecated. Please use as `tf.ops.leaky_relu`") -def leaky_relu(x, alpha=0.2, name="leaky_relu"): - """leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`. - - This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper: - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - The function return the following results: - - When x < 0: ``f(x) = alpha_low * x``. - - When x >= 0: ``f(x) = x``. - - Parameters - ---------- - x : Tensor - Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. - alpha : float - Slope. - name : str - The function name (optional). - - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.layers.Input([10, 200]) - >>> net = tl.layers.Dense(n_units=100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')(net) - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - References - ---------- - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - """ - if not (0 < alpha <= 1): - raise ValueError("`alpha` value must be in [0, 1]`") - - with tf.name_scope(name) as name_scope: - x = tf.convert_to_tensor(x, name="features") - return tf.maximum(x, alpha * x, name=name_scope) - - -def leaky_relu6(x, alpha=0.2, name="leaky_relu6"): - """:func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`. - - This activation function is a modified version :func:`leaky_relu` introduced by the following paper: - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ - - The function return the following results: - - When x < 0: ``f(x) = alpha_low * x``. - - When x in [0, 6]: ``f(x) = x``. - - When x > 6: ``f(x) = 6``. - - Parameters - ---------- - x : Tensor - Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. - alpha : float - Slope. - name : str - The function name (optional). - - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.layers.Input([10, 200]) - >>> net = tl.layers.Dense(n_units=100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')(net) - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - References - ---------- - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ - """ - if not isinstance(alpha, tf.Tensor) and not (0 < alpha <= 1): - raise ValueError("`alpha` value must be in [0, 1]`") - - with tf.name_scope(name) as name_scope: - x = tf.convert_to_tensor(x, name="features") - return tf.minimum(tf.maximum(x, alpha * x), 6, name=name_scope) - - -def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"): - """:func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`. - - This activation function is a modified version :func:`leaky_relu` introduced by the following paper: - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ - - This function push further the logic by adding `leaky` behaviour both below zero and above six. - - The function return the following results: - - When x < 0: ``f(x) = alpha_low * x``. - - When x in [0, 6]: ``f(x) = x``. - - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``. - - Parameters - ---------- - x : Tensor - Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. - alpha_low : float - Slope for x < 0: ``f(x) = alpha_low * x``. - alpha_high : float - Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``. - name : str - The function name (optional). - - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.layers.Input([10, 200]) - >>> net = tl.layers.Dense(n_units=100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')(net) - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - References - ---------- - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ - - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ - - """ - if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1): - raise ValueError("`alpha_high` value must be in [0, 1]`") - - if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1): - raise ValueError("`alpha_low` value must be in [0, 1]`") - - with tf.name_scope(name) as name_scope: - x = tf.convert_to_tensor(x, name="features") - - x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x) - x_is_below_0 = tf.minimum(alpha_low * x, 0) - - return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) - - -def swish(x, name='swish'): - """Swish function. - - See `Swish: a Self-Gated Activation Function `__. - - Parameters - ---------- - x : Tensor - input. - name: str - function name (optional). - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - """ - # TODO: in this case, the beta = 1, but the beta can either be a constant or a trainable parameter - with tf.name_scope(name): - x = tf.nn.sigmoid(x) * x - return x - - -# @tf.RegisterGradient("QuantizeGrad") -# def _sign_grad(unused_op, grad): -# return tf.clip_by_value(grad, -1, 1) - - -@tf.custom_gradient -def sign(x): - """Sign function. - - Clip and binarize tensor using the straight through estimator (STE) for the gradient, usually be used for - quantizing values in `Binarized Neural Networks`: https://arxiv.org/abs/1602.02830. - - Parameters - ---------- - x : Tensor - input. - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - References - ---------- - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013)` - http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf - - - `BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. (2016)` - https://arxiv.org/abs/1602.02830 - - """ - - def grad(dy): - return tf.clip_by_value(dy, -1, 1) - - return tf.sign(x, name='sign'), grad - - -# if tf.__version__ > "1.7": -# @tf.custom_gradient -# def sign(x): # https://www.tensorflow.org/versions/master/api_docs/python/tf/custom_gradient?hl=ES#top_of_page -# """Differentiable sign function using sigmoid as the derivation function, -# see `tf.sign `__ and `tf.custom_gradient -# `__. -# -# Parameters -# ---------- -# x : Tensor -# input. -# -# Returns -# ------- -# Tensor -# A ``Tensor`` in the same type as ``x``. -# -# """ -# tao = tf.ops.sigmoid(x) -# def grad(): -# return tao * (1 - tao) -# return tf.sign(x), grad - - -def hard_tanh(x, name='htanh'): - """Hard tanh activation function. - - Which is a ramp function with low bound of -1 and upper bound of 1, shortcut is `htanh`. - - Parameters - ---------- - x : Tensor - input. - name : str - The function name (optional). - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - """ - # with tf.variable_scope("hard_tanh"): - return tf.clip_by_value(x, -1, 1, name=name) - - -@deprecated(date="2018-06-30", instructions="This API will be deprecated soon as tf.ops.softmax can do the same thing") -def pixel_wise_softmax(x, name='pixel_wise_softmax'): - """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. - - Usually be used for image segmentation. - - Parameters - ---------- - x : Tensor - input. - - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. - - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. - name : str - function name (optional) - - Returns - ------- - Tensor - A ``Tensor`` in the same type as ``x``. - - Examples - -------- - >>> outputs = pixel_wise_softmax(network.outputs) - >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) - - References - ---------- - - `tf.reverse `__ - - """ - with tf.name_scope(name): - return tf.nn.softmax(x) - - -# Alias -lrelu = leaky_relu -lrelu6 = leaky_relu6 -ltrelu6 = leaky_twice_relu6 -htanh = hard_tanh diff --git a/tensorlayer/backend/__init__.py b/tensorlayer/backend/__init__.py index 9167b0131..01e5c8376 100644 --- a/tensorlayer/backend/__init__.py +++ b/tensorlayer/backend/__init__.py @@ -3,3 +3,4 @@ # load ops from .ops import * +from tensorlayer.backend import ops \ No newline at end of file diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 53ce221e5..4d50f0f50 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -80,6 +80,10 @@ from .load_backend import linspace from .load_backend import slice from .load_backend import add_n +from .load_backend import ceil +from .load_backend import multiply +from .load_backend import divide +from .load_backend import identity # dtype from .load_backend import (DType, float16, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64) @@ -113,4 +117,8 @@ from .load_backend import Unstack from .load_backend import Sign from .load_backend import Resize +from .load_backend import Pad +from .load_backend import Minimum +from .load_backend import Maximum +from .load_backend import Meshgrid diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py index 821c63996..b7c36c0dd 100644 --- a/tensorlayer/backend/ops/dragon_backend.py +++ b/tensorlayer/backend/ops/dragon_backend.py @@ -548,6 +548,18 @@ def reduce_min(input_tensor, axis=None): """ return D.min(input_tensor, axis) +class Pad(object): + def __init__(self, paddings, mode="REFLECT"): + if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: + raise Exception("Unsupported mode: {}".format(mode)) + if mode == 'SYMMETRIC': + mode = 'EDGE' + self.paddings = paddings + self.mode = mode + + def __call__(self, x): + outputs = D.pad(x, pads=self.paddings, mode=self.mode, value=0) + return outputs def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ @@ -614,6 +626,15 @@ def stack(values, axis=0): return D.stack(values, axis=axis) +class Meshgrid(object): + def __init__(self, indexing='xy'): + super(Meshgrid, self).__init__() + self.index = indexing + + def __call__(self, inputs): + pass + + def meshgrid(x, y): """ Broadcasts parameters for evaluation on an N-D grid. @@ -988,3 +1009,15 @@ def __init__(self): def __call__(self, x): return D.math.sign(x) + +def ceil(x): + raise NotImplementedError + +def multiply(x, y): + raise NotImplementedError + +def divide(x, y): + raise NotImplementedError + +def identity(x): + raise NotImplementedError \ No newline at end of file diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 93b7c59c7..ed4e48062 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -57,8 +57,8 @@ import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), - # context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), + #context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), + context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index 6e26403ce..8ebc6f120 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -14,6 +14,7 @@ from mindspore._c_expression import Tensor as Tensor_ from mindspore.ops import operations as P from mindspore.ops import functional as F +from mindspore.ops import composite as C import mindspore.context as context from mindspore.nn import Cell @@ -219,6 +220,17 @@ def _initialize(self, arr): _assignment(arr, tmp) +class RandomNormal(Cell): + def __init__(self, mean=0.0, stddev=0.01, seed=None): + super(RandomNormal, self).__init__() + self.normal = Normal(mean=mean, stddev=stddev, seed=seed) + + def construct(self, shape): + arr = np.ndarray(shape) + outputs = self.normal(arr) + return outputs + + def random_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None): """ Outputs random values from a normal distribution. @@ -440,6 +452,26 @@ def dtypes(dt): return _dtypeDict[dt] +class Maximum(Cell): + + def __init__(self): + super(Maximum, self).__init__() + self.maximum = P.Maximum() + + def construct(self, x, y): + return self.maximum(x, y) + + +class Minimum(Cell): + + def __init__(self): + super(Minimum, self).__init__() + self.minimum = P.Minimum() + + def construct(self, x, y): + return self.minimum(x, y) + + def minimum(x, y): """ Returns the min of x and y (i.e. x < y ? x : y) element-wise. @@ -679,6 +711,16 @@ def reduce_min(input_tensor, axis=None): outputs = Rmin_obj(input_tensor, axis) return outputs +class Pad(Cell): + def __init__(self, paddings, mode="REFLECT"): + super(Pad, self).__init__() + if mode not in ["REFLECT", "SYMMETRIC"]: + raise Exception("Unsupported mode: {}".format(mode)) + self.pad = P.MirrorPad(mode=mode) + self.paddings = Tensor(paddings) + + def construct(self, x): + return self.pad(x, self.paddings) def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ @@ -699,27 +741,27 @@ def pad(tensor, paddings, mode='CONSTANT', constant_values=0): ------- A Tensor. Has the same type as tensor. """ - # todo: constant value , padding mode - pass + raise NotImplementedError class Unstack(Cell): - def __init__(self, axis, num=None): - self.axis = axis - self.num = num + super(Unstack, self).__init__() + if num is not None: + raise ("The num Parameters do not need to be set.") + self.unstack = P.Unpack(axis=axis) - def __call__(self, values): - raise NotImplementedError + def construct(self, values): + return self.unstack(values) class Stack(Cell): + def __init__(self, axis=0): + super(Stack, self).__init__() + self.stack = P.Pack(axis=axis) - def __init__(self, axis): - self.axis = axis - - def __call__(self, values): - raise NotImplementedError + def construct(self, values): + return self.stack(values) def stack(values, axis=0): @@ -738,11 +780,22 @@ def stack(values, axis=0): ------- A stacked Tensor with the same type as values. """ - # todo Not Implemented - raise NotImplementedError + _stack = P.Pack(axis=axis) + return _stack(values) -def meshgrid(x, y): +class Meshgrid(Cell): + def __init__(self, indexing='xy'): + super(Meshgrid, self).__init__() + self._meshgrid = P.Meshgrid(indexing=indexing) + + def construct(self, *args): + inputs = tuple(*args) + return self._meshgrid(inputs) + + + +def meshgrid(*args, **kwargs): """ Broadcasts parameters for evaluation on an N-D grid. @@ -758,7 +811,9 @@ def meshgrid(x, y): A list of N Tensors with rank N. """ - pass + _meshgrid = P.Meshgrid(**kwargs) + return _meshgrid(*args) + def range(start, limit=None, delta=1, dtype=None): @@ -915,7 +970,7 @@ def transpose(a, perm=None, conjugate=False): ------- A transposed Tensor. """ - # todo None conjugate + # TODO conjugate trans_obj = P.Transpose() outputs = trans_obj(a, perm) print(outputs) @@ -959,8 +1014,10 @@ def clip_by_value(t, clip_value_min, clip_value_max): ------- A clipped Tensor or IndexedSlices. """ - - pass + min_value = Tensor(clip_value_min, mstype.float32) + max_value = Tensor(clip_value_max, mstype.float32) + output = C.clip_by_value(t, min_value, max_value) + return output def split(value, num_or_size_splits, axis=0, num=None): @@ -1074,12 +1131,15 @@ def __init__(self, scale, method, antialias=False, data_format='channels_last', if method not in ['nearest', 'bilinear']: raise ('The method must be "nearest" or "bilinear".') self.method = method + + if ksize is None: + raise ('The "bilinear" and "nearest" method must enter ksize. The dimension of size must be 2 (H, W).') + + out_seize = (int(ksize[0] * scale[0]), int(ksize[1] * scale[1])) if self.method == 'nearest': - self.resize = P.ResizeNearestNeighbor(size=tuple(scale), align_corners=antialias) + self.resize = P.ResizeNearestNeighbor(size=out_seize, align_corners=antialias) elif self.method == 'bilinear': - if ksize is None: - raise ('The bilinear method must enter ksize. The dimension of size must be 2 (H, W).') - out_seize = (int(ksize[0] * scale[0]), int(ksize[1] * scale[1])) + self.resize = P.ResizeBilinear(size=out_seize) def construct(self, inputs): @@ -1096,37 +1156,59 @@ def resize(inputs, output_size, method, antialias): class ZeroPadding1D(Cell): - - def __init__(self): + def __init__(self, padding): super(ZeroPadding1D, self).__init__() + if np.size(padding) == 2: + self.pad = P.Pad(paddings=padding) + else: + raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.") - def construct(self, *inputs, **kwargs): - raise NotImplementedError + def construct(self, inputs): + return self.pad(inputs) class ZeroPadding2D(Cell): - - def __init__(self): + def __init__(self, padding): super(ZeroPadding2D, self).__init__() + if np.size(padding) == 4: + self.pad = P.Pad(paddings=padding) + else: + raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.") - def construct(self, *inputs, **kwargs): - raise NotImplementedError + def construct(self, inputs): + return self.pad(inputs) class ZeroPadding3D(Cell): - - def __init__(self): + def __init__(self, padding): super(ZeroPadding3D, self).__init__() + if np.size(padding) == 6: + self.pad = P.Pad(paddings=padding) + else: + raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.") - def construct(self, *inputs, **kwargs): - raise NotImplementedError + def construct(self, inputs): + return self.pad(inputs) class Sign(Cell): def __init__(self): - super(Sign).__init__() + super(Sign, self).__init__() self.sign = P.Sign() def construct(self, x): return self.sign(x) + +def ceil(x): + _ceil = P.Ceil() + return _ceil(x) + +def multiply(x, y): + raise NotImplementedError + +def divide(x, y): + raise NotImplementedError + +def identity(x): + raise NotImplementedError \ No newline at end of file diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 95c0ed156..944b10cb4 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -240,7 +240,7 @@ def construct(self, x): return self.leakyrelu(x) -def leaky_relu(x): +def leaky_relu(x, alpha=0.2): """ Compute the Leaky ReLU activation function. @@ -255,7 +255,9 @@ def leaky_relu(x): The activation value. """ - pass + leaky_relu = LeakyReLU(alpha=alpha) + output = leaky_relu(x) + return leaky_relu class Softplus(Cell): @@ -282,7 +284,8 @@ def softplus(x): A Tensor. Has the same type as features. """ - pass + obj = Softplus() + return obj(x) class Tanh(Cell): @@ -309,7 +312,8 @@ def tanh(x): A Tensor. Has the same type as x. """ - pass + _tanh = Tanh() + return _tanh(x) class Sigmoid(Cell): @@ -476,7 +480,6 @@ def __init__(self, stride, padding, data_format='NWC', dilations=None, out_chann self.expand_dims = P.ExpandDims() self.squeeze = P.Squeeze(2) - self.shape = P.Shape() def construct(self, x, filters): if self.data_format == 'NWC': @@ -488,6 +491,8 @@ def construct(self, x, filters): output = self.conv2d(x, filters) output = self.squeeze(output) + if self.data_format == 'NWC': + output = nchw_to_nhwc(output) return output diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index 91ce8343f..d22513042 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -290,6 +290,22 @@ def dtypes(dt): return _dtypeDict[dt] +class Maximum(object): + def __init__(self): + pass + + def __call__(self, x, y): + return tf.maximum(x=x, y=y) + + +class Minimum(object): + def __init__(self): + pass + + def __call__(self, x, y): + return tf.minimum(x=x, y=y) + + def minimum(x, y): """ Returns the min of x and y (i.e. x < y ? x : y) element-wise. @@ -414,7 +430,7 @@ def sqrt(x): class ReduceSum(object): - def __init__(self, axis): + def __init__(self, axis=None): self.axis = axis def __call__(self, input): @@ -439,7 +455,7 @@ def reduce_mean(input_tensor, axis=None): ---------- input_tensor : tensor The tensor to reduce. Should have numeric type. - axis : int + axis : list The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). name : str @@ -507,6 +523,17 @@ def reduce_min(input_tensor, axis=None): return tf.reduce_min(input_tensor, axis=axis) +class Pad(object): + def __init__(self, paddings, mode="REFLECT"): + if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: + raise Exception("Unsupported mode: {}".format(mode)) + self.paddings = paddings + self.mode = mode + + def __call__(self, x): + outputs = tf.pad(x, self.paddings, mode=self.mode, constant_values=0) + return outputs + def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ Pads a tensor. @@ -545,7 +572,7 @@ def __call__(self, values): class Stack(object): - def __init__(self, axis): + def __init__(self, axis=0): self.axis = axis def __call__(self, values): @@ -572,7 +599,16 @@ def stack(values, axis=0): return tf.stack(values, axis=axis) -def meshgrid(x, y): +class Meshgrid(object): + def __init__(self, indexing='xy'): + super(Meshgrid, self).__init__() + self.index = indexing + + def __call__(self, inputs): + return tf.meshgrid(inputs) + + +def meshgrid(*args, **kwargs): """ Broadcasts parameters for evaluation on an N-D grid. @@ -588,7 +624,7 @@ def meshgrid(x, y): A list of N Tensors with rank N. """ - return tf.meshgrid(x, y) + return tf.meshgrid(*args, **kwargs) def range(start, limit=None, delta=1, dtype=None): @@ -725,7 +761,7 @@ def transpose(a, perm=None, conjugate=False): ---------- a : tensor A Tensor. - perm : int + perm : list / int A permutation of the dimensions of a. conjugate : bool Setting it to True is mathematically equivalent to tf.math.conj(tf.transpose(input)). @@ -955,3 +991,15 @@ def __init__(self): def __call__(self, x): return tf.sign(x) + +def ceil(x): + return tf.math.ceil(x) + +def multiply(x, y): + return tf.multiply(x, y) + +def divide(x, y): + return tf.divide(x, y) + +def identity(x): + return tf.identity(x) \ No newline at end of file diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 2fa178d04..71d978f5d 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -232,7 +232,7 @@ def __call__(self, x): return tf.nn.leaky_relu(x, alpha=self.alpha) -def leaky_relu(x): +def leaky_relu(x, alpha=0.2): """ Compute the Leaky ReLU activation function. @@ -247,7 +247,7 @@ def leaky_relu(x): The activation value. """ - return tf.nn.leaky_relu(x) + return tf.nn.leaky_relu(x, alpha=alpha) class Softplus(object): @@ -636,7 +636,7 @@ def moments(x, axes, shift=None, keepdims=False): ---------- x : tensor A Tensor - axes : ints + axes : list or ints Axes along which to compute mean and variance. shift : int Not used in the current implementation. diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py index f42a1d4f9..fab126161 100644 --- a/tensorlayer/dataflow/mindspore_data.py +++ b/tensorlayer/dataflow/mindspore_data.py @@ -29,9 +29,6 @@ class Shuffle(str, Enum): FILES: str = "file" -print(Shuffle.GLOBAL) - - def Apply(dataset, transformation_func): return dataset.apply(transformation_func) diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index bc6d44d97..d05a0c3b0 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -2030,6 +2030,7 @@ def assign_weights(weights, network): if tl.BACKEND == 'tensorflow': for idx, param in enumerate(weights): ops.append(network.all_weights[idx].assign(param)) + elif tl.BACKEND == 'mindspore': class Assign_net(Cell): diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py index 1a130b5c3..309d5861d 100644 --- a/tensorlayer/layers/__init__.py +++ b/tensorlayer/layers/__init__.py @@ -11,7 +11,7 @@ from .extend import * from .image_resampling import * from .inputs import * -# from .lambda_layers import * +from .lambda_layers import * from .merge import * from .noise import * from .normalization import * diff --git a/tensorlayer/layers/activation.py b/tensorlayer/layers/activation.py index 34788c672..c5a0de383 100644 --- a/tensorlayer/layers/activation.py +++ b/tensorlayer/layers/activation.py @@ -10,6 +10,13 @@ 'PRelu', 'PRelu6', 'PTRelu6', + 'LeakyReLU', + 'LeakyReLU6', + 'LeakyTwiceRelu6', + 'Ramp', + 'Swish', + 'HardTanh', + 'Mish' ] @@ -35,11 +42,6 @@ class PRelu(Module): ----------- >>> inputs = tl.layers.Input([10, 5]) >>> prelulayer = tl.layers.PRelu(channel_shared=True) - >>> print(prelulayer) - PRelu(channel_shared=True,in_channels=None,name=prelu) - >>> prelu = prelulayer(inputs) - >>> model = tl.models.Model(inputs=inputs, outputs=prelu) - >>> out = model(data, is_train=True) References ----------- @@ -96,9 +98,15 @@ def build(self, inputs_shape): self.sigmoid = tl.ops.Sigmoid() def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + pos = self.relu(inputs) - alpha_var_constrained = self.sigmoid(self.alpha_var) - neg = -alpha_var_constrained * self.relu(-inputs) + self.alpha_var_constrained = self.sigmoid(self.alpha_var) + neg = -self.alpha_var_constrained * self.relu(-inputs) return pos + neg @@ -195,6 +203,12 @@ def build(self, inputs_shape): # @tf.function def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + alpha_var_constrained = self.sigmoid(self.alpha_var) pos = self.relu(inputs) pos_6 = -self.relu(inputs - 6) @@ -247,6 +261,7 @@ def __init__( self, channel_shared=False, in_channels=None, + data_format='channels_last', a_init=truncated_normal(mean=0.0, stddev=0.05), name=None # "ptrelu6" ): @@ -254,6 +269,7 @@ def __init__( super(PTRelu6, self).__init__(name) self.channel_shared = channel_shared self.in_channels = in_channels + self.data_format = data_format self.a_init = a_init if self.channel_shared: @@ -297,6 +313,12 @@ def build(self, inputs_shape): # @tf.function def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + alpha_low_constrained = self.sigmoid(self.alpha_low) alpha_high_constrained = self.sigmoid(self.alpha_high) pos = self.relu(inputs) @@ -304,3 +326,280 @@ def forward(self, inputs): neg = -alpha_low_constrained * self.relu(-inputs) return pos + pos_6 + neg + + +class Ramp(Module): + """Ramp activation function. + + Reference: [tf.clip_by_value] + + Parameters + ---------- + x : Tensor + input. + v_min : float + cap input to v_min as a lower bound. + v_max : float + cap input to v_max as a upper bound. + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + """ + + def __init__(self, v_min=0, v_max=1): + super(Ramp, self).__init__() + self._built = True + self.v_min = v_min + self.v_max = v_max + + def forward(self, x): + return tl.ops.clip_by_value(x, clip_value_min=self.v_min, clip_value_max=self.v_max) + + +class LeakyReLU(Module): + """ + + This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper: + `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + + The function return the following results: + - When x < 0: ``f(x) = alpha_low * x``. + - When x >= 0: ``f(x) = x``. + + Parameters + ---------- + x : Tensor + Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. + alpha : float + Slope. + name : str + The function name (optional). + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.LeakyReLU(alpha=0.5)(net) + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + References + ---------- + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + + """ + + def __init__(self, alpha=0.2): + super(LeakyReLU, self).__init__() + self._built = True + self.alpha = alpha + self._leakyrelu = tl.ops.LeakyReLU(alpha=alpha) + + def forward(self, x): + return self._leakyrelu(x) + + +class LeakyReLU6(Module): + """ + This activation function is a modified version :func:`leaky_relu` introduced by the following paper: + `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + + This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: + `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ + + The function return the following results: + - When x < 0: ``f(x) = alpha_low * x``. + - When x in [0, 6]: ``f(x) = x``. + - When x > 6: ``f(x) = 6``. + + Parameters + ---------- + x : Tensor + Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. + alpha : float + Slope. + name : str + The function name (optional). + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.LeakyReLU6(alpha=0.5)(net) + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + References + ---------- + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ + """ + + def __init__(self, alpha=0.2): + super(LeakyReLU6, self).__init__() + self._built = True + if not (0 < alpha <= 1): + raise ValueError("`alpha` value must be in [0, 1]`") + + self.alpha = alpha + self.minimum = tl.ops.Minimum() + self.maximum = tl.ops.Maximum() + + def forward(self, x): + return self.minimum(self.maximum(x, self.alpha * x), 6) + + +class LeakyTwiceRelu6(Module): + """ + + This activation function is a modified version :func:`leaky_relu` introduced by the following paper: + `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + + This activation function also follows the behaviour of the activation function :func:`tf.ops.relu6` introduced by the following paper: + `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ + + This function push further the logic by adding `leaky` behaviour both below zero and above six. + + The function return the following results: + - When x < 0: ``f(x) = alpha_low * x``. + - When x in [0, 6]: ``f(x) = x``. + - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``. + + Parameters + ---------- + x : Tensor + Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. + alpha_low : float + Slope for x < 0: ``f(x) = alpha_low * x``. + alpha_high : float + Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``. + name : str + The function name (optional). + + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.LeakyTwiceRelu6(alpha_low=0.5, alpha_high=0.2)(net) + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + References + ---------- + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__ + - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__ + + """ + + def __init__(self, alpha_low=0.2, alpha_high=0.2): + super(LeakyTwiceRelu6, self).__init__() + self._built = True + if not (0 < alpha_high <= 1): + raise ValueError("`alpha_high` value must be in [0, 1]`") + + if not (0 < alpha_low <= 1): + raise ValueError("`alpha_low` value must be in [0, 1]`") + + self.alpha_low = alpha_low + self.alpha_high = alpha_high + self.minimum = tl.ops.Minimum() + self.maximum = tl.ops.Maximum() + + def forward(self, x): + x_is_above_0 = self.minimum(x, 6 * (1 - self.alpha_high) + self.alpha_high * x) + x_is_below_0 = self.minimum(self.alpha_low * x, 0) + return self.maximum(x_is_above_0, x_is_below_0) + + +class Swish(Module): + """Swish function. + + See `Swish: a Self-Gated Activation Function `__. + + Parameters + ---------- + x : Tensor + input. + name: str + function name (optional). + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + """ + + def __init__(self): + super(Swish, self).__init__() + self.sigmoid = tl.ops.Sigmoid() + self._built = True + + def forward(self, x): + return self.sigmoid(x) * x + + +class HardTanh(Module): + """Hard tanh activation function. + + Which is a ramp function with low bound of -1 and upper bound of 1, shortcut is `htanh`. + + Parameters + ---------- + x : Tensor + input. + name : str + The function name (optional). + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + """ + + def __init__(self): + super(HardTanh, self).__init__() + self._built = True + + def forward(self, x): + return tl.ops.clip_by_value(x, -1, 1) + + +class Mish(Module): + """Mish activation function. + + Reference: [Mish: A Self Regularized Non-Monotonic Neural Activation Function .Diganta Misra, 2019] + + Parameters + ---------- + x : Tensor + input. + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + """ + + def __init__(self): + super(Mish, self).__init__() + self._tanh = tl.ops.Tanh() + self._softplus = tl.ops.Softplus() + self._built = True + + def forward(self, x): + return x * self._tanh(self._softplus(x)) diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index ac7ff2f4d..4e1630ea1 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -10,7 +10,7 @@ """ # from .binary_conv import * -# from .deformable_conv import * +from .deformable_conv import * from .depthwise_conv import * # from .dorefa_conv import * # from .expert_conv import * @@ -55,7 +55,7 @@ # 'BinaryConv2d', # deformable - # 'DeformableConv2d', + 'DeformableConv2d', # depthwise 'DepthwiseConv2d', diff --git a/tensorlayer/layers/convolution/deformable_conv.py b/tensorlayer/layers/convolution/deformable_conv.py new file mode 100644 index 000000000..8a2cba09e --- /dev/null +++ b/tensorlayer/layers/convolution/deformable_conv.py @@ -0,0 +1,324 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module + +__all__ = [ + 'DeformableConv2d', +] + + +class DeformableConv2d(Module): + """The :class:`DeformableConv2d` class is a 2D + `Deformable Convolutional Networks `__. + + Parameters + ---------- + offset_layer : tf.Tensor + To predict the offset of convolution operations. + The shape is (batchsize, input height, input width, 2*(number of element in the convolution kernel)) + e.g. if apply a 3*3 kernel, the number of the last dimension should be 18 (2*3*3) + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : str + A unique layer name. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([5, 10, 10, 16], name='input') + >>> offset1 = tl.layers.Conv2d( + ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1' + ... )(net) + >>> deformconv1 = tl.layers.DeformableConv2d( + ... offset_layer=offset1, n_filter=32, filter_size=(3, 3), name='deformable1' + ... )(net) + >>> offset2 = tl.layers.Conv2d( + ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset2' + ... )(deformconv1) + >>> deformconv2 = tl.layers.DeformableConv2d( + ... offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2' + ... )(deformconv1) + + References + ---------- + - The deformation operation was adapted from the implementation in `here `__ + + Notes + ----- + - The padding is fixed to 'SAME'. + - The current implementation is not optimized for memory usgae. Please use it carefully. + + """ + + def __init__( + self, + offset_layer=None, + n_filter=32, + filter_size=(3, 3), + act=None, + padding='SAME', + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None + ): + super().__init__(name, act=act) + + self.offset_layer = offset_layer + self.n_filter = n_filter + self.filter_size = filter_size + self.padding = padding + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + # layer forward state + self._forward_state = False + + self.kernel_n = filter_size[0] * filter_size[1] + if self.offset_layer.get_shape()[-1] != 2 * self.kernel_n: + raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * self.kernel_n) + + if self.in_channels is not None: + self.build(None) + self._built = True + + logging.info( + "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" % ( + self.name, self.n_filter, str(self.filter_size + ), self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', padding={padding}' + ) + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + + self.input_h = int(inputs_shape[1]) + self.input_w = int(inputs_shape[2]) + initial_offsets = tl.ops.stack(tl.ops.meshgrid(tl.ops.range(self.filter_size[0]), + tl.ops.range(self.filter_size[1]), indexing='ij')) # initial_offsets --> (kh, kw, 2) + initial_offsets = tl.ops.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) + initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) + initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) + initial_offsets = tl.ops.tile( + initial_offsets, [self.input_h, self.input_w, 1, 1] + ) # initial_offsets --> (h, w, n, 2) + initial_offsets = tl.ops.cast(initial_offsets, 'float32') + grid = tl.ops.meshgrid( + tl.ops.range( + -int((self.filter_size[0] - 1) / 2.0), int(self.input_h - int((self.filter_size[0] - 1) / 2.0)), 1 + ), + tl.ops.range( + -int((self.filter_size[1] - 1) / 2.0), int(self.input_w - int((self.filter_size[1] - 1) / 2.0)), 1 + ), indexing='ij' + ) + + grid = tl.ops.stack(grid, axis=-1) + grid = tl.ops.cast(grid, 'float32') # grid --> (h, w, 2) + grid = tl.ops.expand_dims(grid, 2) # grid --> (h, w, 1, 2) + grid = tl.ops.tile(grid, [1, 1, self.kernel_n, 1]) # grid --> (h, w, n, 2) + self.grid_offset = grid + initial_offsets # grid_offset --> (h, w, n, 2) + + self.filter_shape = (1, 1, self.kernel_n, self.in_channels, self.n_filter) + + self.W = self._get_weights("W_deformableconv2d", shape=self.filter_shape, init=self.W_init) + + if self.b_init: + self.b = self._get_weights("b_deformableconv2d", shape=(self.n_filter, ), init=self.b_init) + + self.conv3d = tl.ops.Conv3D(strides=[1, 1, 1, 1, 1], padding='VALID') + self.bias_add = tl.ops.BiasAdd() + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + offset = self.offset_layer + grid_offset = self.grid_offset + + input_deform = self._tf_batch_map_offsets(inputs, offset, grid_offset) + outputs = self.conv3d(input=input_deform, filters=self.W) + outputs = tl.ops.reshape(tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter]) + if self.b_init: + outputs = self.bias_add(outputs, self.b) + if self.act: + outputs = self.act(outputs) + return outputs + + def _to_bc_h_w(self, x, x_shape): + """(b, h, w, c) -> (b*c, h, w)""" + x = tl.ops.transpose(a=x, perm=[0, 3, 1, 2]) + x = tl.ops.reshape(x, (-1, x_shape[1], x_shape[2])) + return x + + def _to_b_h_w_n_c(self, x, x_shape): + """(b*c, h, w, n) -> (b, h, w, n, c)""" + x = tl.ops.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3])) + x = tl.ops.transpose(a=x, perm=[0, 2, 3, 4, 1]) + return x + + def tf_flatten(self, a): + """Flatten tensor""" + return tl.ops.reshape(a, [-1]) + + def _get_vals_by_coords(self, inputs, coords, idx, out_shape): + indices = tl.ops.stack( + [idx, self.tf_flatten(coords[:, :, :, :, 0]), + self.tf_flatten(coords[:, :, :, :, 1])], axis=-1 + ) + vals = tl.ops.gather_nd(inputs, indices) + vals = tl.ops.reshape(vals, out_shape) + return vals + + def _tf_repeat(self, a, repeats): + """Tensorflow version of np.repeat for 1D""" + # https://github.com/tensorflow/tensorflow/issues/8521 + + if len(a.get_shape()) != 1: + raise AssertionError("This is not a 1D Tensor") + + a = tl.ops.expand_dims(a, -1) + a = tl.ops.tile(a, [1, repeats]) + a = self.tf_flatten(a) + return a + + def _tf_batch_map_coordinates(self, inputs, coords): + """Batch version of tf_map_coordinates + + Only supports 2D feature maps + + Parameters + ---------- + inputs : ``tf.Tensor`` + shape = (b*c, h, w) + coords : ``tf.Tensor`` + shape = (b*c, h, w, n, 2) + + Returns + ------- + ``tf.Tensor`` + A Tensor with the shape as (b*c, h, w, n) + + """ + inputs_shape = inputs.get_shape() + coords_shape = coords.get_shape() + batch_channel = tl.get_tensor_shape(inputs)[0] + input_h = int(inputs_shape[1]) + input_w = int(inputs_shape[2]) + kernel_n = int(coords_shape[3]) + n_coords = input_h * input_w * kernel_n + + coords_lt = tl.ops.cast(tl.ops.floor(coords), 'int32') + coords_rb = tl.ops.cast(tl.ops.ceil(coords), 'int32') + coords_lb = tl.ops.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) + coords_rt = tl.ops.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) + + idx = self._tf_repeat(tl.ops.range(batch_channel), n_coords) + + vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n)) + + coords_offset_lt = coords - tl.ops.cast(coords_lt, 'float32') + + vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0] + vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0] + mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1] + + return mapped_vals + + def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): + """Batch map offsets into input + + Parameters + ------------ + inputs : ``tf.Tensor`` + shape = (b, h, w, c) + offsets: ``tf.Tensor`` + shape = (b, h, w, 2*n) + grid_offset: `tf.Tensor`` + Offset grids shape = (h, w, n, 2) + + Returns + ------- + ``tf.Tensor`` + A Tensor with the shape as (b, h, w, c) + + """ + inputs_shape = inputs.get_shape() + batch_size = tl.get_tensor_shape(inputs)[0] + kernel_n = int(int(offsets.get_shape()[3]) / 2) + input_h = inputs_shape[1] + input_w = inputs_shape[2] + channel = inputs_shape[3] + + # inputs (b, h, w, c) --> (b*c, h, w) + inputs = self._to_bc_h_w(inputs, inputs_shape) + + # offsets (b, h, w, 2*n) --> (b, h, w, n, 2) + offsets = tl.ops.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2)) + # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2) + # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1]) + + coords = tl.ops.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2) + coords = tl.ops.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2) + + # clip out of bound + coords = tl.ops.stack( + [ + tl.ops.clip_by_value(coords[:, :, :, :, 0], 0.0, tl.ops.cast(input_h - 1, 'float32')), + tl.ops.clip_by_value(coords[:, :, :, :, 1], 0.0, tl.ops.cast(input_w - 1, 'float32')) + ], axis=-1 + ) + coords = tl.ops.tile(coords, [channel, 1, 1, 1, 1]) + + mapped_vals = self._tf_batch_map_coordinates(inputs, coords) + # (b*c, h, w, n) --> (b, h, w, n, c) + mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel]) + + return mapped_vals + +if __name__ == '__main__': + net = tl.layers.Input([5, 10, 10, 16], name='input') + offset1 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1', in_channels=16)(net) + deformconv1 = DeformableConv2d(offset_layer=offset1, n_filter=32, filter_size=(3, 3), name='deformable1')(net) + offset2 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset2', in_channels=32)(deformconv1) + deformconv2 = DeformableConv2d(offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2')(deformconv1) + print(deformconv2) + diff --git a/tensorlayer/layers/convolution/depthwise_conv.py b/tensorlayer/layers/convolution/depthwise_conv.py index 1cc90e1bb..bac18dec7 100644 --- a/tensorlayer/layers/convolution/depthwise_conv.py +++ b/tensorlayer/layers/convolution/depthwise_conv.py @@ -156,6 +156,12 @@ def build(self, inputs_shape): self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.depthwise_conv2d(input=inputs, filter=self.W) if self.b_init_flag: outputs = self.bias_add(outputs, self.b) diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index ecee3ee6e..161034f51 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -138,15 +138,20 @@ def build(self, inputs_shape): self.act_init_flag = False if self.act: - self.activate = self.act self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv1d(inputs, self.W) if self.b_init_flag: outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) if self.act_init_flag: - outputs = self.activate(outputs) + outputs = self.act(outputs) if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': outputs = tl.nchw_to_nhwc(outputs) @@ -282,6 +287,12 @@ def build(self, inputs_shape): self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv2d(inputs, self.W) if self.b_init_flag: outputs = self.bias_add(outputs, self.b) @@ -423,15 +434,20 @@ def build(self, inputs_shape): self.act_init_flag = False if self.act: - self.activate = self.act() self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv3d(inputs, self.W) if self.b_init_flag: outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) if self.act_init_flag: - outputs = self.activate(outputs) + outputs = self.act(outputs) return outputs @@ -564,15 +580,20 @@ def build(self, inputs_shape): self.act_init_flag = False if self.act: - self.activate = self.act self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv1d_transpose(inputs, self.W) if self.b_init_flag: outputs = self.bias_add(outputs, self.b) if self.act_init_flag: - outputs = self.activate(outputs) + outputs = self.act(outputs) if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': outputs = tl.nchw_to_nhwc(outputs) return outputs @@ -710,6 +731,12 @@ def build(self, inputs_shape): self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv2d_transpose(inputs, self.W) if self.b_init_flag: outputs = self.bias_add(outputs, self.b) @@ -726,7 +753,7 @@ class DeConv3d(Module): Parameters ---AppData\Local\Continuum\anaconda3\envs\ms_tf\lib\site-packages\mindspore\common\api.py", line 412, in compile result = self._executor.compile(obj, args_list, phase, use_vm) -RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of type 'std:------- + RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of type 'std:------- n_filter : int The number of filters. filter_size : tuple of int @@ -855,15 +882,20 @@ def build(self, inputs_shape): self.act_init_flag = False if self.act: - self.activate = self.act() self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + outputs = self.conv3d_transpose(inputs, self.W) if self.b_init_flag: outputs = self.bias_add(outputs, self.b) if self.act_init_flag: - outputs = self.activate(outputs) + outputs = self.act(outputs) if tl.BACKEND == 'mindspore' and self.data_format == 'NDHWC': outputs = tl.nchw_to_nhwc(outputs) return outputs diff --git a/tensorlayer/layers/core/common.py b/tensorlayer/layers/core/common.py index 9a67e20c7..1af257f12 100644 --- a/tensorlayer/layers/core/common.py +++ b/tensorlayer/layers/core/common.py @@ -1,7 +1,10 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os import tensorlayer as tl +from tensorlayer.files import utils +from tensorlayer import logging _act_dict = { "relu": tl.ops.ReLU, @@ -11,6 +14,7 @@ "softplus": tl.ops.Softplus, "tanh": tl.ops.Tanh, "sigmoid": tl.ops.Sigmoid, + "softmax": tl.ops.Softmax } @@ -32,3 +36,139 @@ def str2act(act): if act not in _act_dict.keys(): raise Exception("Unsupported act: {}".format(act)) return _act_dict[act] + +def _save_weights(self, file_path, format=None): + """Input file_path, save model weights into a file of given format. + Use self.load_weights() to restore. + + Parameters + ---------- + file_path : str + Filename to which the model weights will be saved. + format : str or None + Saved file format. + Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. + If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. + 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of + the hdf5 file. + 3) 'npz' will save model weights sequentially into a npz file. + 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. + 5) 'ckpt' will save model weights into a tensorflow ckpt file. + + Default None. + + Examples + -------- + 1) Save model weights in hdf5 format by default. + >>> net = vgg16() + >>> net.save_weights('./model.h5') + ... + >>> net.load_weights('./model.h5') + + 2) Save model weights in npz/npz_dict format + >>> net = vgg16() + >>> net.save_weights('./model.npz') + >>> net.save_weights('./model.npz', format='npz_dict') + + """ + + # self.all_weights = self.network.all_weights + if self.all_weights is None or len(self.all_weights) == 0: + logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") + return + + if format is None: + postfix = file_path.split('.')[-1] + if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: + format = postfix + else: + format = 'hdf5' + + if format == 'hdf5' or format == 'h5': + utils.save_weights_to_hdf5(file_path, self) + elif format == 'npz': + utils.save_npz(self.all_weights, file_path) + elif format == 'npz_dict': + utils.save_npz_dict(self.all_weights, file_path) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." + "Other format is not supported now." + ) + +def _load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights(). + + Parameters + ---------- + file_path : str + Filename from which the model weights will be loaded. + format : str or None + If not specified (None), the postfix of the file_path will be used to decide its format. If specified, + value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + In addition, it should be the same format when you saved the file using self.save_weights(). + Default is None. + in_order : bool + Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. + If 'in_order' is True, weights from the file will be loaded into model in a sequential way. + If 'in_order' is False, weights from the file will be loaded into model by matching the name + with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from + a weights file which is saved in graph(eager) mode. + Default is True. + skip : bool + Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is + 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights + whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will + occur when mismatch is found. + Default is False. + + Examples + -------- + 1) load model from a hdf5 file. + >>> net = vgg16() + >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> net.load_weights('./model_eager.h5') # load sequentially + + 2) load model from a npz file + >>> net.load_weights('./model.npz') + + 2) load model from a npz file, which is saved as npz_dict previously + >>> net.load_weights('./model.npz', format='npz_dict') + + Notes + ------- + 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is + saved in a different mode, it is recommended to set 'in_order' be True. + 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, + 'in_order' argument will be ignored. + + """ + if not os.path.exists(file_path): + raise FileNotFoundError("file {} doesn't exist.".format(file_path)) + + if format is None: + format = file_path.split('.')[-1] + + if format == 'hdf5' or format == 'h5': + if skip ==True or in_order == False: + # load by weights name + utils.load_hdf5_to_weights(file_path, self, skip) + else: + # load in order + utils.load_hdf5_to_weights_in_order(file_path, self) + elif format == 'npz': + utils.load_and_assign_npz(file_path, self) + elif format == 'npz_dict': + utils.load_and_assign_npz_dict(file_path, self, skip) + elif format == 'ckpt': + # TODO: enable this when tf save ckpt is enabled + raise NotImplementedError("ckpt load/save is not supported now.") + else: + raise ValueError( + "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " + "Other format is not supported now." + ) diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index 0ff40f1ce..726780cf4 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -1,13 +1,13 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from .common import str2act +from .common import str2act, _save_weights, _load_weights from mindspore.nn import Cell -import os import tensorlayer as tl -from tensorlayer.files import utils from tensorlayer.layers.utils import (get_variable_with_initializer) -from tensorlayer import logging +from collections import OrderedDict + +__all__ = ['Module', 'SequentialLayer', 'LayerList'] _global_layer_name_dict = {} # TODO: better implementation? @@ -39,16 +39,22 @@ def __init__(self, name=None, act=None, *args, **kwargs): _global_layer_name_dict[name] = 0 self.name = name + if isinstance(act, str): - self.act = str2act(act) - else: - if act: - self.act = act() + str_act = str2act(act) + + if act: + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + self.act = str_act + elif isinstance(act, str): + self.act = str_act() else: - self.act = act + self.act = act() + else: + self.act = act # Layer building state - self._built = False + # self._built = False # Layer nodes state self._nodes = [] @@ -80,140 +86,12 @@ def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), tr return weight def save_weights(self, file_path, format=None): - """Input file_path, save model weights into a file of given format. - Use self.load_weights() to restore. - - Parameters - ---------- - file_path : str - Filename to which the model weights will be saved. - format : str or None - Saved file format. - Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - 1) If this is set to None, then the postfix of file_path will be used to decide saved format. - If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. - 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of - the hdf5 file. - 3) 'npz' will save model weights sequentially into a npz file. - 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. - 5) 'ckpt' will save model weights into a tensorflow ckpt file. - - Default None. - - Examples - -------- - 1) Save model weights in hdf5 format by default. - >>> net = vgg16() - >>> net.save_weights('./model.h5') - ... - >>> net.load_weights('./model.h5') - - 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') - - """ - - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: - logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") - return - - if format is None: - postfix = file_path.split('.')[-1] - if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: - format = postfix - else: - format = 'hdf5' - - if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) - elif format == 'npz': - utils.save_npz(self.all_weights, file_path) - elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." - "Other format is not supported now." - ) + """Input file_path, save model weights into a file of given format.""" + _save_weights(self, file_path, format) def load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights(). - - Parameters - ---------- - file_path : str - Filename from which the model weights will be loaded. - format : str or None - If not specified (None), the postfix of the file_path will be used to decide its format. If specified, - value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - In addition, it should be the same format when you saved the file using self.save_weights(). - Default is None. - in_order : bool - Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. - If 'in_order' is True, weights from the file will be loaded into model in a sequential way. - If 'in_order' is False, weights from the file will be loaded into model by matching the name - with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from - a weights file which is saved in graph(eager) mode. - Default is True. - skip : bool - Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is - 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights - whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will - occur when mismatch is found. - Default is False. - - Examples - -------- - 1) load model from a hdf5 file. - >>> net = vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially - - 2) load model from a npz file - >>> net.load_weights('./model.npz') - - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') - - Notes - ------- - 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is - saved in a different mode, it is recommended to set 'in_order' be True. - 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, - 'in_order' argument will be ignored. - - """ - if not os.path.exists(file_path): - raise FileNotFoundError("file {} doesn't exist.".format(file_path)) - - if format is None: - format = file_path.split('.')[-1] - - if format == 'hdf5' or format == 'h5': - if skip ==True or in_order == False: - # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) - else: - # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) - elif format == 'npz': - utils.load_and_assign_npz(file_path, self) - elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " - "Other format is not supported now." - ) + """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(self, file_path, format, in_order, skip) @staticmethod def _compute_shape(tensors): @@ -252,7 +130,7 @@ def set_train(self): self.add_flags_recursive(training=True) return self - def eval(self): + def set_eval(self): """Set this network in evaluation mode. After calling this method, all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. @@ -268,6 +146,14 @@ def eval(self): self.add_flags_recursive(training=False) return self + def test(self): + """Set this network in evaluation mode.""" + self.eval() + + def infer(self): + """Set this network in evaluation mode.""" + self.eval() + @property def trainable_weights(self): """ @@ -281,7 +167,8 @@ def trainable_weights(self): Returns: List, the list of trainable weights. """ - return list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) + self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) + return self._trainable_weights @property def nontrainable_weights(self): @@ -304,76 +191,161 @@ def all_weights(self): + list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True))) -class LayerNode(object): +class SequentialLayer(Module): + """ + Sequential layer container. + + A list of Layers will be added to it in the order they are passed in the constructor. + Alternatively, an ordered dict of layers can also be passed in. + + Args: + args (list, OrderedDict): List of subclass of Module. + + Raises: + TypeError: If the type of the argument is not list or OrderedDict. + + Inputs: + - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. + + Outputs: + Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + + Examples: + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> relu = tl.ReLU() + >>> seq = tl.layers.SequentialLayer([conv, bn, relu]) + >>> + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) + """ + def __init__(self, *args): + super(SequentialLayer, self).__init__() + # self._built = True + if len(args) == 1: + layers = args[0] + if isinstance(layers, list): + for index, layer in enumerate(layers): + self.insert_child_to_layer(str(index), layer) + elif isinstance(layers, OrderedDict): + for name, layer in layers.items(): + self.insert_child_to_layer(name, layer) + else: + raise TypeError('Layers must be list or orderedDict') + else: + for index, layer in enumerate(args): + self.insert_child_to_layer(str(index), layer) + self.layer_list = list(self._layers.values()) + + def __getitem__(self, index): + if isinstance(index, slice): + return self.__class__( + OrderedDict(list(self._layers.items())[index])) + index = self._valid_index(len(self), index) + return list(self._layers.values())[index] + + def __setitem__(self, index, layer): + if self._valid_module(layer): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + self._layers[key] = layer + self.layer_list = list(self._layers.values()) + + def __delitem__(self, index): + if isinstance(index, int): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + del self._layers[key] + elif isinstance(index, slice): + keys = list(self._layers.keys())[index] + for key in keys: + del self._layers[key] + else: + raise TypeError('Index {} is not int type or slice type'.format(index)) + self.layer_list = list(self._layers.values()) + + def __len__(self): + return len(self._layers) + + def set_grad(self, flag=True): + self.requires_grad = flag + for layer in self._layers.values(): + layer.set_grad(flag) + + def append(self, layer): + if self._valid_module(layer): + self._layers[str(len(self))] = layer + self.layer_list = list(self._layers.values()) + return self + + def build(self, inputs_shape): + pass + + def forward(self, input_data): + for layer in self.layer_list: + input_data = layer(input_data) + return input_data + + def _valid_index(self, layer_num, index): + if not isinstance(index, int): + raise TypeError("Index {} is not int type") + if not -layer_num <= index < layer_num: + raise IndexError("Index should be a number in range [{}, {}), but got {}" + .format(-layer_num, layer_num, index)) + return index % layer_num + + def _valid_module(self, layer): + if issubclass(layer.__class__, Module): + return True + raise TypeError('Module {} is not subclass of Module'.format(layer)) + + +class LayerList(Module): """ - The class :class:`LayerNode` class represents a conceptional node for a layer. + The class :class:`LayerList` is a linear stack of layers. - LayerNode is used for building static model and it is actually a light weighted - wrapper over Layer. Specifically, it is used for building static computational graph - (see _construct_graph() in tl.models.Model). In static model, each layer relates to - one or more LayerNode, and the connection relationship between layers is built upon - LayerNode. In addition, LayerNode eases layer reuse and weights sharing. + The :class:`LayerList` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. Parameters ---------- - layer : tl.layers.Layer - A tl layer that wants to create a node. - node_index : int - Index of this node in layer._nodes. - in_nodes :a list of LayerNode - Father nodes to this node. - in_tensors : a list of tensors - Input tensors to this node. - out_tensors : a list of tensors - Output tensors to this node. - in_tensor_idxes : a list of int - Indexes of each input tensor in its corresponding node's out_tensors. + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. Methods --------- __init__() - Initializing the LayerNode. - __call__() - (1) Forwarding through the layer. (2) Update its input/output tensors. + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. """ - def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes): + def __init__(self, layers, name=None): """ + Initializing the LayerList given a list of Layer. - Parameters - ---------- - layer - node_index - in_nodes - in_tensors - out_tensors - in_tensor_idxes + :param layers: list of Layer + :param name: str or None """ - self.layer = layer - self.node_index = node_index - self.in_nodes = in_nodes - self.out_nodes = [] - self.in_tensors = in_tensors - self.out_tensors = out_tensors - self.name = layer.name + "_node_{}".format(node_index) - - self.in_tensors_idxes = in_tensor_idxes - - self.visited = False - - def __call__(self, inputs, **kwargs): - """(1) Forwarding through the layer. (2) Update its input/output tensors.""" - outputs = self.layer.forward(inputs, **kwargs) - self.in_tensors = tolist(inputs) - self.out_tensors = tolist(outputs) - return self.out_tensors - - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] + + super(LayerList, self).__init__(name=name) + pass + + def __getitem__(self, idx): + pass + + def __len__(self): + return len(self.layers) + + def __repr__(self): + pass + + def forward(self, inputs): + pass + diff --git a/tensorlayer/layers/core/core_tensorflow_dragon.py b/tensorlayer/layers/core/core_tensorflow_dragon.py index 4078c87df..66ca67472 100644 --- a/tensorlayer/layers/core/core_tensorflow_dragon.py +++ b/tensorlayer/layers/core/core_tensorflow_dragon.py @@ -1,15 +1,15 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from .common import str2act +from .common import str2act, _save_weights, _load_weights from tensorlayer.backend.ops.load_backend import BACKEND from collections import OrderedDict -import time, os +import time import tensorlayer as tl -from tensorlayer.decorators import (protected_method) -from tensorlayer.files import utils from tensorlayer.layers.utils import (get_variable_with_initializer) from tensorlayer import logging +__all__ = ['Module', 'SequentialLayer', 'LayerList'] + _global_layer_name_dict = {} # TODO: better implementation? if BACKEND == 'tensorflow': @@ -53,13 +53,19 @@ def __init__(self, name=None, act=None, *args, **kwargs): _global_layer_name_dict[name] = 0 self.name = name + if isinstance(act, str): - self.act = str2act(act) - else: - if act: - self.act = act() + str_act = str2act(act) + + if act: + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + self.act = str_act + elif isinstance(act, str): + self.act = str_act() else: - self.act = act + self.act = act() + else: + self.act = act # Layer building state self._built = False @@ -73,17 +79,17 @@ def __init__(self, name=None, act=None, *args, **kwargs): self._trainable_weights = [] self._nontrainable_weights = [] - # nested layers - # self._layers = None + # layer forward state + self._forward_state = False # Layer training state self.is_train = True def extend_repr(self): """ - Sets the extended representation of the Cell. + Sets the extended representation of the Module. - To print customized extended information, re-implement this method in your own cells. + To print customized extended information, re-implement this method in your own Layers. """ return '' @@ -124,20 +130,23 @@ def __setattr__(self, name, value): del self.__dict__[name] if params and name in params: raise TypeError("Expected type is Parameter, but got Module.") - if value._built is False: - raise AttributeError( - "The registered layer `{}` should be built in advance. " - "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) - ) + # TODO How to prompt the user, enter the in_channels. + # TODO Automatic shape inference when the user does not enter inchannels. + # if value._built is False: + # raise AttributeError( + # "The registered layer `{}` should be built in advance. " + # "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) + # ) layers[name] = value else: object.__setattr__(self, name, value) - def __call__(self, *inputs, **kwargs): + def __call__(self, inputs, *args, **kwargs): if BACKEND in ['tensorflow', 'dragon']: - output = self.forward(*inputs) + output = self.forward(inputs, *args, **kwargs) else: exit("Unsupported backend") + return output def forward(self, *inputs, **kwargs): @@ -155,140 +164,12 @@ def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), tr return weight def save_weights(self, file_path, format=None): - """Input file_path, save model weights into a file of given format. - Use self.load_weights() to restore. - - Parameters - ---------- - file_path : str - Filename to which the model weights will be saved. - format : str or None - Saved file format. - Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - 1) If this is set to None, then the postfix of file_path will be used to decide saved format. - If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. - 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of - the hdf5 file. - 3) 'npz' will save model weights sequentially into a npz file. - 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. - 5) 'ckpt' will save model weights into a tensorflow ckpt file. - - Default None. - - Examples - -------- - 1) Save model weights in hdf5 format by default. - >>> net = vgg16() - >>> net.save_weights('./model.h5') - ... - >>> net.load_weights('./model.h5') - - 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') - - """ - - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: - logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") - return - - if format is None: - postfix = file_path.split('.')[-1] - if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: - format = postfix - else: - format = 'hdf5' - - if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) - elif format == 'npz': - utils.save_npz(self.all_weights, file_path) - elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." - "Other format is not supported now." - ) + """Input file_path, save model weights into a file of given format.""" + _save_weights(self, file_path, format) def load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights(). - - Parameters - ---------- - file_path : str - Filename from which the model weights will be loaded. - format : str or None - If not specified (None), the postfix of the file_path will be used to decide its format. If specified, - value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - In addition, it should be the same format when you saved the file using self.save_weights(). - Default is None. - in_order : bool - Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. - If 'in_order' is True, weights from the file will be loaded into model in a sequential way. - If 'in_order' is False, weights from the file will be loaded into model by matching the name - with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from - a weights file which is saved in graph(eager) mode. - Default is True. - skip : bool - Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is - 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights - whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will - occur when mismatch is found. - Default is False. - - Examples - -------- - 1) load model from a hdf5 file. - >>> net = vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially - - 2) load model from a npz file - >>> net.load_weights('./model.npz') - - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') - - Notes - ------- - 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is - saved in a different mode, it is recommended to set 'in_order' be True. - 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, - 'in_order' argument will be ignored. - - """ - if not os.path.exists(file_path): - raise FileNotFoundError("file {} doesn't exist.".format(file_path)) - - if format is None: - format = file_path.split('.')[-1] - - if format == 'hdf5' or format == 'h5': - if skip ==True or in_order == False: - # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) - else: - # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) - elif format == 'npz': - utils.load_and_assign_npz(file_path, self) - elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " - "Other format is not supported now." - ) + """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(self, file_path, format, in_order, skip) def _set_mode_for_layers(self, is_train): """Set all layers of this network to a given mode. @@ -304,10 +185,11 @@ def _set_mode_for_layers(self, is_train): if isinstance(layer, Module): layer.is_train = is_train + def set_train(self): """Set this network in training mode. After calling this method, all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. - + TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. Examples -------- >>> import tensorlayer as tl @@ -319,10 +201,10 @@ def set_train(self): self.is_train = True self._set_mode_for_layers(True) - def eval(self): + def set_eval(self): """Set this network in evaluation mode. After calling this method, all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. - + TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. Examples -------- >>> import tensorlayer as tl @@ -335,6 +217,14 @@ def eval(self): self.is_train = False self._set_mode_for_layers(False) + def test(self): + """Set this network in evaluation mode.""" + self.eval() + + def infer(self): + """Set this network in evaluation mode.""" + self.eval() + @staticmethod def _compute_shape(tensors): if isinstance(tensors, list): @@ -358,7 +248,7 @@ def insert_param_to_layer(self, param_name, param, check_name=True): Raises: KeyError: If the name of parameter is null or contains dot. AttributeError: If user did not call init() first. - TypeError: If the type of parameter is not Parameter. + TypeError: If the type of parameter is not Parameter_. """ if not param_name: raise KeyError("The name of parameter should not be null.") @@ -502,6 +392,26 @@ def check_names(self): ) names.add(param.name) + def insert_child_to_layer(self, child_name, child): + """ + Adds a child layer to the current layer. + + Args: + child_name (str): Name of the child layer. + child (Module): The child layer to be inserted. + + Raises: + KeyError: Child Module's name is incorrect or duplicated with the other child name. + TypeError: Child Module's type is incorrect. + """ + if not child_name or '.' in child_name: + raise KeyError("Child layer name is incorrect.") + if hasattr(self, child_name) and child_name not in self._layers: + raise KeyError("Duplicate child name '{}'.".format(child_name)) + if not isinstance(child, Module) and child is not None: + raise TypeError("Child layer type is incorrect.") + self._layers[child_name] = child + def parameters_and_names(self, name_prefix='', expand=True): """ Returns an iterator over layer parameters. @@ -590,71 +500,258 @@ def name_layers(self): layers[name] = layer return layers + def init_build(self, *inputs, **kwargs): + """ + (1) This method must be called when the Layer has no input in_channels. + (2) Automatic shape inference when the user does not enter inchannels. + """ + + self.forward(*inputs, **kwargs) + + +class SequentialLayer(Module): + """ + Sequential layer container. + + A list of Layers will be added to it in the order they are passed in the constructor. + Alternatively, an ordered dict of layers can also be passed in. -class LayerNode(object): + Args: + args (list, OrderedDict): List of subclass of Module. + + Raises: + TypeError: If the type of the argument is not list or OrderedDict. + + Inputs: + - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. + + Outputs: + Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + + Examples: + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) + """ + def __init__(self, *args): + super(SequentialLayer, self).__init__() + self._built = True + if len(args) == 1: + layers = args[0] + if isinstance(layers, list): + for index, layer in enumerate(layers): + self.insert_child_to_layer(str(index), layer) + elif isinstance(layers, OrderedDict): + for name, layer in layers.items(): + self.insert_child_to_layer(name, layer) + else: + raise TypeError('Layers must be list or orderedDict') + else: + for index, layer in enumerate(args): + self.insert_child_to_layer(str(index), layer) + self.layer_list = list(self._layers.values()) + + def __getitem__(self, index): + if isinstance(index, slice): + return self.__class__( + OrderedDict(list(self._layers.items())[index])) + index = self._valid_index(len(self), index) + return list(self._layers.values())[index] + + def __setitem__(self, index, layer): + if self._valid_module(layer): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + self._layers[key] = layer + self.layer_list = list(self._layers.values()) + + def __delitem__(self, index): + if isinstance(index, int): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + del self._layers[key] + elif isinstance(index, slice): + keys = list(self._layers.keys())[index] + for key in keys: + del self._layers[key] + else: + raise TypeError('Index {} is not int type or slice type'.format(index)) + self.layer_list = list(self._layers.values()) + + def __len__(self): + return len(self._layers) + + + def append(self, layer): + if self._valid_module(layer): + self._layers[str(len(self))] = layer + self.layer_list = list(self._layers.values()) + return self + + def build(self, inputs_shape): + pass + + def forward(self, input_data): + for layer in self.layer_list: + input_data = layer(input_data) + return input_data + + def _valid_index(self, layer_num, index): + if not isinstance(index, int): + raise TypeError("Index {} is not int type") + if not -layer_num <= index < layer_num: + raise IndexError("Index should be a number in range [{}, {}), but got {}" + .format(-layer_num, layer_num, index)) + return index % layer_num + + def _valid_module(self, layer): + if issubclass(layer.__class__, Module): + return True + raise TypeError('Module {} is not subclass of Module'.format(layer)) + + +class LayerList(Module): """ - The class :class:`LayerNode` class represents a conceptional node for a layer. + The class :class:`LayerList` is a linear stack of layers. - LayerNode is used for building static model and it is actually a light weighted - wrapper over Layer. Specifically, it is used for building static computational graph - (see _construct_graph() in tl.models.Model). In static model, each layer relates to - one or more LayerNode, and the connection relationship between layers is built upon - LayerNode. In addition, LayerNode eases layer reuse and weights sharing. + The :class:`LayerList` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. Parameters ---------- - layer : tl.layers.Layer - A tl layer that wants to create a node. - node_index : int - Index of this node in layer._nodes. - in_nodes :a list of LayerNode - Father nodes to this node. - in_tensors : a list of tensors - Input tensors to this node. - out_tensors : a list of tensors - Output tensors to this node. - in_tensor_idxes : a list of int - Indexes of each input tensor in its corresponding node's out_tensors. + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. Methods --------- __init__() - Initializing the LayerNode. - __call__() - (1) Forwarding through the layer. (2) Update its input/output tensors. + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. """ - def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes): + def __init__(self, layers, name=None): """ + Initializing the LayerList given a list of Layer. - Parameters - ---------- - layer - node_index - in_nodes - in_tensors - out_tensors - in_tensor_idxes + :param layers: list of Layer + :param name: str or None """ - self.layer = layer - self.node_index = node_index - self.in_nodes = in_nodes - self.out_nodes = [] - self.in_tensors = in_tensors - self.out_tensors = out_tensors - self.name = layer.name + "_node_{}".format(node_index) - self.in_tensors_idxes = in_tensor_idxes + super(LayerList, self).__init__(name=name) + self.layers = layers + is_built = True + for layer in self.layers: + self._trainable_weights.extend(layer.trainable_weights) + self._nontrainable_weights.extend(layer.nontrainable_weights) + if layer._built is False: + is_built = False + # if layer._built and layer.all_weights is not None: + # # some layers in the list passed in have already been built + # # e.g. using input shape to construct layers in dynamic eager + # if self._all_weights is None: + # self._all_weights = list() + # self._all_weights.extend(layer.all_weights) + if is_built: + self._built = True + + logging.info( + "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) + ) - self.visited = False + # check layer name uniqueness in LayerList + local_layer_name_set = set() + for layer in self.layers: + if layer.name not in local_layer_name_set: + local_layer_name_set.add(layer.name) + else: + raise ValueError( + 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % + layer.name + ) - def __call__(self, inputs, **kwargs): - """(1) Forwarding through the layer. (2) Update its input/output tensors.""" - outputs = self.layer.forward(inputs, **kwargs) - self.in_tensors = tolist(inputs) - self.out_tensors = tolist(outputs) - return self.out_tensors + def __getitem__(self, idx): + if isinstance(idx, slice): + return LayerList(list(self.layers)[idx]) + else: + return self.layers[idx] + + def __len__(self): + return len(self.layers) + + def __repr__(self): + tmpstr = 'LayerList' + '(\n' + for idx, layer in enumerate(self.layers): + modstr = layer.__repr__() + modstr = _addindent(modstr, 2) + tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' + + tmpstr = tmpstr + ')' + return tmpstr + + @property + def trainable_weights(self): + return self._trainable_weights + @property + def nontrainable_weights(self): + return self._nontrainable_weights + + @property + def all_weights(self): + return self._trainable_weights + self._nontrainable_weights + + # def build(self, inputs_shape): + # """ + # Build the LayerList. The layer instances will be connected automatically one by one. + # """ + # in_tensor = self._input_tensors + # # in_layer = self._input_layer + # for layer in self.layers: + # is_build = layer._built + # out_tensor = layer(in_tensor) + # # nlayer = layer(in_layer) + # if is_build is False and layer.all_weights is not None: + # if self._all_weights is None: + # self._all_weights = list() + # self._all_weights.extend(layer.all_weights) + # layer._built = True + # in_tensor = out_tensor + # # in_layer = nlayer + + def forward(self, inputs): + """ + Forward the computation. The computation will go through all layer instances. + """ + z = inputs + for layer in self.layers: + z = layer.forward(z) + return z + + def _set_mode_for_layers(self, is_train): + """Set training/evaluation mode for all layer instances.""" + self.is_train = is_train + for layer in self.layers: + if isinstance(layer, LayerList): + layer._set_mode_for_layers(is_train) + else: + layer.is_train = is_train + + def get_args(self): + init_args = {} + layers = self.layer_args["layers"] + init_args["layers"] = [layer.config for layer in layers] + init_args.update({"layer_type": "layerlist"}) + return init_args def tolist(tensors): if isinstance(tensors, list) or isinstance(tensors, tuple): @@ -664,3 +761,14 @@ def tolist(tensors): return ntensors else: return [tensors] + +def _addindent(s_, numSpaces): + s = s_.split('\n') + # don't do anything for single-line stuff + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(numSpaces * ' ') + line for line in s] + s = '\n'.join(s) + s = first + '\n' + s + return s \ No newline at end of file diff --git a/tensorlayer/layers/dense/__init__.py b/tensorlayer/layers/dense/__init__.py index c39d8f36b..2291b949e 100644 --- a/tensorlayer/layers/dense/__init__.py +++ b/tensorlayer/layers/dense/__init__.py @@ -10,19 +10,19 @@ """ from .base_dense import * -# from .binary_dense import * -# from .dorefa_dense import * +from .binary_dense import * +from .dorefa_dense import * from .dropconnect import * from .quan_dense import * -# from .quan_dense_bn import * -# from .ternary_dense import * +from .quan_dense_bn import * +from .ternary_dense import * __all__ = [ - # 'BinaryDense', + 'BinaryDense', 'Dense', - # 'DorefaDense', + 'DorefaDense', 'DropconnectDense', - # 'TernaryDense', + 'TernaryDense', 'QuanDense', - # 'QuanDenseWithBN', + 'QuanDenseWithBN', ] diff --git a/tensorlayer/layers/dense/base_dense.py b/tensorlayer/layers/dense/base_dense.py index 2092cf39e..a0470300b 100644 --- a/tensorlayer/layers/dense/base_dense.py +++ b/tensorlayer/layers/dense/base_dense.py @@ -108,6 +108,12 @@ def build(self, inputs_shape): self.matmul = tl.ops.MatMul() def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + z = self.matmul(inputs, self.W) if self.b_init_flag: z = self.bias_add(z, self.b) diff --git a/tensorlayer/layers/dense/binary_dense.py b/tensorlayer/layers/dense/binary_dense.py new file mode 100644 index 000000000..90c6e2b49 --- /dev/null +++ b/tensorlayer/layers/dense/binary_dense.py @@ -0,0 +1,109 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.layers.utils import quantize + +__all__ = [ + 'BinaryDense', +] + + +class BinaryDense(Module): + """The :class:`BinaryDense` class is a binary fully connected layer, which weights are either -1 or 1 while inferencing. + + Note that, the bias vector would not be binarized. + + Parameters + ---------- + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`Sign` after :class:`BatchNorm`. + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inference. (TODO). + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels: int + The number of channels of the previous layer. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : None or str + A unique layer name. + + """ + + def __init__( + self, + n_units=100, + act=None, + use_gemm=False, + W_init=tl.initializers.truncated_normal(stddev=0.05), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, + ): + super().__init__(name, act=act) + self.n_units = n_units + self.use_gemm = use_gemm + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels is not None: + self.build((None, self.in_channels)) + self._built = True + + logging.info( + "BinaryDense %s: %d %s" % + (self.name, n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(n_units={n_units}, ' + actstr) + if self.in_channels is not None: + s += ', in_channels=\'{in_channels}\'' + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if len(inputs_shape) != 2: + raise Exception("The input dimension must be rank 2, please reshape or flatten it") + + if self.in_channels is None: + self.in_channels = inputs_shape[1] + + if self.use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + n_in = inputs_shape[-1] + self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) + if self.b_init is not None: + self.b = self._get_weights("biases", shape=(self.n_units), init=self.b_init) + self.bias_add = tl.ops.BiasAdd() + + self.matmul = tl.ops.MatMul() + + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + W_ = quantize(self.W) + outputs = self.matmul(inputs, W_) + + if self.b_init is not None: + outputs = self.bias_add(outputs, self.b) + + if self.act: + outputs = self.act(outputs) + return outputs diff --git a/tensorlayer/layers/dense/dorefa_dense.py b/tensorlayer/layers/dense/dorefa_dense.py new file mode 100644 index 000000000..bf35c14d4 --- /dev/null +++ b/tensorlayer/layers/dense/dorefa_dense.py @@ -0,0 +1,116 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.layers.utils import cabs, quantize_active, quantize_weight + +__all__ = [ + 'DorefaDense', +] + + +class DorefaDense(Module): + """The :class:`DorefaDense` class is a binary fully connected layer, which weights are 'bitW' bits and the output of the previous layer + are 'bitA' bits while inferencing. + + Note that, the bias vector would not be binarized. + + Parameters + ---------- + bitW : int + The bits of this layer's parameter + bitA : int + The bits of the output of previous layer + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`Sign` after :class:`BatchNorm`. + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels: int + The number of channels of the previous layer. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : a str + A unique layer name. + + """ + + def __init__( + self, + bitW=1, + bitA=3, + n_units=100, + act=None, + use_gemm=False, + W_init=tl.initializers.truncated_normal(stddev=0.05), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, #'dorefa_dense', + ): + super().__init__(name, act=act) + self.bitW = bitW + self.bitA = bitA + self.n_units = n_units + self.use_gemm = use_gemm + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels is not None: + self.build((None, self.in_channels)) + self._built = True + + logging.info( + "DorefaDense %s: %d %s" % + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(n_units={n_units}, ' + actstr) + s += ', bitW={bitW}, bitA={bitA}' + if self.in_channels is not None: + s += ', in_channels=\'{in_channels}\'' + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if len(inputs_shape) != 2: + raise Exception("The input dimension must be rank 2, please reshape or flatten it") + + if self.in_channels is None: + self.in_channels = inputs_shape[1] + + if self.use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + n_in = inputs_shape[-1] + self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) + if self.b_init is not None: + self.b = self._get_weights("biases", shape=(self.n_units), init=self.b_init) + self.bias_add = tl.ops.BiasAdd() + self.matmul = tl.ops.MatMul() + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + inputs = quantize_active(cabs(inputs), self.bitA) + W_ = quantize_weight(self.W, self.bitW) + outputs = self.matmul(inputs, W_) + if self.b_init is not None: + outputs = self.bias_add(outputs, self.b) + if self.act: + outputs = self.act(outputs) + return outputs \ No newline at end of file diff --git a/tensorlayer/layers/dense/dropconnect.py b/tensorlayer/layers/dense/dropconnect.py index b28e73af5..178ea2cb7 100644 --- a/tensorlayer/layers/dense/dropconnect.py +++ b/tensorlayer/layers/dense/dropconnect.py @@ -40,9 +40,9 @@ class DropconnectDense(Module): -------- >>> net = tl.layers.Input([None, 784], name='input') >>> net = tl.layers.DropconnectDense(keep=0.8, - ... n_units=800, act=tf.nn.relu, name='relu1')(net) + ... n_units=800, act=tl.ReLU, name='relu1')(net) >>> net = tl.layers.DropconnectDense(keep=0.5, - ... n_units=800, act=tf.nn.relu, name='relu2')(net) + ... n_units=800, act=tl.ReLU, name='relu2')(net) >>> net = tl.layers.DropconnectDense(keep=0.5, ... n_units=10, name='output')(net) @@ -110,6 +110,12 @@ def build(self, inputs_shape): self.bias_add = tl.ops.BiasAdd() def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + W_dropcon = self.dropout(self.W) outputs = self.matmul(inputs, W_dropcon) if self.b_init: diff --git a/tensorlayer/layers/dense/quan_dense.py b/tensorlayer/layers/dense/quan_dense.py index 062858a5f..460402398 100644 --- a/tensorlayer/layers/dense/quan_dense.py +++ b/tensorlayer/layers/dense/quan_dense.py @@ -99,6 +99,11 @@ def build(self, inputs_shape): self.matmul = tl.ops.MatMul() def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True inputs = quantize_active_overflow(inputs, self.bitA) diff --git a/tensorlayer/layers/dense/quan_dense_bn.py b/tensorlayer/layers/dense/quan_dense_bn.py new file mode 100644 index 000000000..3f811a2a7 --- /dev/null +++ b/tensorlayer/layers/dense/quan_dense_bn.py @@ -0,0 +1,188 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + + + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorflow.python.training import moving_averages +from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow, + mean_var_with_update, w_fold, bias_fold) + +__all__ = [ + 'QuanDenseWithBN', +] + + +class QuanDenseWithBN(Module): + """The :class:`QuanDenseWithBN` class is a quantized fully connected layer with BN, which weights are 'bitW' bits and the output of the previous layer + are 'bitA' bits while inferencing. + # TODO The QuanDenseWithBN only supports TensorFlow backend. + Parameters + ---------- + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer. + decay : float + A decay factor for `ExponentialMovingAverage`. + Suggest to use a large value for large dataset. + epsilon : float + Eplison. + is_train : boolean + Is being used for training or inference. + beta_init : initializer or None + The initializer for initializing beta, if None, skip beta. + Usually you should not skip beta unless you know what happened. + gamma_init : initializer or None + The initializer for initializing gamma, if None, skip gamma. + bitW : int + The bits of this layer's parameter + bitA : int + The bits of the output of previous layer + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). + W_init : initializer + The initializer for the the weight matrix. + W_init_args : dictionary + The arguments for the weight matrix initializer. + in_channels: int + The number of channels of the previous layer. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : a str + A unique layer name. + + Examples + --------- + >>> import tensorlayer as tl + >>> net = tl.layers.Input([50, 256]) + >>> layer = tl.layers.QuanDenseWithBN(128, act='relu', name='qdbn1')(net) + >>> net = tl.layers.QuanDenseWithBN(256, act='relu', name='qdbn2')(net) + """ + + def __init__( + self, + n_units=100, + act=None, + decay=0.9, + epsilon=1e-5, + is_train=False, + bitW=8, + bitA=8, + gamma_init=tl.initializers.truncated_normal(stddev=0.05), + beta_init=tl.initializers.truncated_normal(stddev=0.05), + use_gemm=False, + W_init=tl.initializers.truncated_normal(stddev=0.05), + W_init_args=None, + in_channels=None, + name=None, # 'quan_dense_with_bn', + ): + super(QuanDenseWithBN, self).__init__(act=act, W_init_args=W_init_args, name=name) + self.n_units = n_units + self.decay = decay + self.epsilon = epsilon + self.is_train = is_train + self.bitW = bitW + self.bitA = bitA + self.gamma_init = gamma_init + self.beta_init = beta_init + self.use_gemm = use_gemm + self.W_init = W_init + self.in_channels = in_channels + + if self.in_channels is not None: + self.build((None, self.in_channels)) + self._built = True + + logging.info( + "QuanDenseLayerWithBN %s: %d %s" % + (self.name, n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(n_units={n_units}, ' + actstr) + s += ', bitW={bitW}, bitA={bitA}' + if self.in_channels is not None: + s += ', in_channels=\'{in_channels}\'' + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.in_channels is None and len(inputs_shape) != 2: + raise Exception("The input dimension must be rank 2, please reshape or flatten it") + + if self.in_channels is None: + self.in_channels = inputs_shape[1] + + if self.use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + n_in = inputs_shape[-1] + self.W = self._get_weights("weights", shape=(n_in, self.n_units), init=self.W_init) + + para_bn_shape = (self.n_units, ) + if self.gamma_init: + self.scale_para = self._get_weights("gamm_weights", shape=para_bn_shape, init=self.gamma_init) + else: + self.scale_para = None + + if self.beta_init: + self.offset_para = self._get_weights("beta_weights", shape=para_bn_shape, init=self.beta_init) + else: + self.offset_para = None + + self.moving_mean = self._get_weights( + "moving_mean", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False + ) + self.moving_variance = self._get_weights( + "moving_variacne", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False + ) + + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + x = inputs + inputs = quantize_active_overflow(inputs, self.bitA) + mid_out = tl.ops.matmul(x, self.W) + + mean, variance = tl.ops.moments(x=mid_out, axes=list(range(len(mid_out.get_shape()) - 1))) + + update_moving_mean = moving_averages.assign_moving_average( + self.moving_mean, mean, self.decay, zero_debias=False + ) # if zero_debias=True, has bias + + update_moving_variance = moving_averages.assign_moving_average( + self.moving_variance, variance, self.decay, zero_debias=False + ) # if zero_debias=True, has bias + + if self.is_train: + mean, var = mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance) + else: + mean, var = self.moving_mean, self.moving_variance + + _w_fold = w_fold(self.W, self.scale_para, var, self.epsilon) + + W = quantize_weight_overflow(_w_fold, self.bitW) + + outputs = tl.ops.matmul(inputs, W) + + if self.beta_init: + _bias_fold = bias_fold(self.offset_para, self.scale_para, mean, var, self.epsilon) + outputs = tl.ops.bias_add(outputs, _bias_fold) + else: + outputs = outputs + + if self.act: + outputs = self.act(outputs) + else: + outputs = outputs + return outputs diff --git a/tensorlayer/layers/dense/ternary_dense.py b/tensorlayer/layers/dense/ternary_dense.py new file mode 100644 index 000000000..5cf4457e5 --- /dev/null +++ b/tensorlayer/layers/dense/ternary_dense.py @@ -0,0 +1,109 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.layers.utils import compute_alpha, ternary_operation + +__all__ = [ + 'TernaryDense', +] + + +class TernaryDense(Module): + """The :class:`TernaryDense` class is a ternary fully connected layer, which weights are either -1 or 1 or 0 while inference. + # TODO The TernaryDense only supports TensorFlow backend. + + Note that, the bias vector would not be tenaried. + + Parameters + ---------- + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer, usually set to ``tf.act.sign`` or apply :class:`SignLayer` after :class:`BatchNormLayer`. + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inference. (TODO). + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels: int + The number of channels of the previous layer. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : None or str + A unique layer name. + + """ + + def __init__( + self, + n_units=100, + act=None, + use_gemm=False, + W_init=tl.initializers.truncated_normal(stddev=0.05), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, #'ternary_dense', + ): + super().__init__(name, act=act) + self.n_units = n_units + self.use_gemm = use_gemm + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels is not None: + self.build((None, self.in_channels)) + self._built = True + + logging.info( + "TernaryDense %s: %d %s" % + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(n_units={n_units}, ' + actstr) + if self.in_channels is not None: + s += ', in_channels=\'{in_channels}\'' + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if len(inputs_shape) != 2: + raise Exception("The input dimension must be rank 2, please reshape or flatten it") + + if self.in_channels is None: + self.in_channels = inputs_shape[1] + + if self.use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + n_in = inputs_shape[-1] + + self.W = self._get_weights(var_name="weights", shape=(n_in, self.n_units), init=self.W_init) + if self.b_init is not None: + self.b = self._get_weights(var_name="biases", shape=(self.n_units), init=self.b_init) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + alpha = compute_alpha(self.W) + W_ = ternary_operation(self.W) + W_ = tl.ops.multiply(alpha, W_) + + outputs = tl.ops.matmul(inputs, W_) + + if self.b_init is not None: + outputs = tl.ops.bias_add(outputs, self.b, name='bias_add') + if self.act: + outputs = self.act(outputs) + return outputs diff --git a/tensorlayer/layers/dropout.py b/tensorlayer/layers/dropout.py index 61cc881d4..8dccda605 100644 --- a/tensorlayer/layers/dropout.py +++ b/tensorlayer/layers/dropout.py @@ -54,13 +54,3 @@ def forward(self, inputs): else: outputs = inputs return outputs - - -if __name__ == '__main__': - shapes_do = (20, 16, 50) - from tensorlayer.layers.inputs import Input - # from mindspore import context - # context.set_context(mode=context.GRAPH_MODE) - inputs_do = Input(shapes_do) - dropout = Dropout(keep=0.1)(inputs_do) - print(dropout) diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index e8d1e3748..a6b431368 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -253,7 +253,9 @@ def build(self, inputs_shape): ) self.embedding_lookup = tl.EmbeddingLookup() - self.nce_loss = tl.NCELoss(**self.nce_loss_args) + + if self.activate_nce_loss: + self.nce_loss = tl.NCELoss(**self.nce_loss_args) def forward(self, inputs, use_nce_loss=None): """ @@ -504,25 +506,12 @@ def forward(self, inputs): # Count number of non-padding words in each sentence sentence_lengths = self.count_nonzero(masks, axis=1) - sentence_embeddings = tf.divide( + sentence_embeddings = tl.ops.divide( sum_word_embeddings, sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 - name='sentence_embeddings' ) outputs = sentence_embeddings return outputs - -if __name__ == '__main__': - import tensorflow as tf - import tensorlayer as tl - batch_size = 8 - length = 5 - input = tl.layers.Input([batch_size, length], dtype=tl.int32) - avgembed = AverageEmbedding(vocabulary_size=1000, embedding_size=50, name='avg') - print(avgembed) - AverageEmbedding(vocabulary_size=1000, embedding_size=50, pad_value=0) - tensor = avgembed(input) - print(tensor) diff --git a/tensorlayer/layers/image_resampling.py b/tensorlayer/layers/image_resampling.py index a7bdbf835..a676a34ac 100644 --- a/tensorlayer/layers/image_resampling.py +++ b/tensorlayer/layers/image_resampling.py @@ -43,7 +43,7 @@ class UpSampling2d(Module): """ - def __init__(self, scale, method='bilinear', antialias=False, data_format='channel_last', name=None, ksize=None): + def __init__(self, scale, method='bilinear', antialias=False, data_format='channels_last', name=None, ksize=None): super(UpSampling2d, self).__init__(name) self.method = method self.antialias = antialias @@ -85,13 +85,6 @@ def forward(self, inputs): outputs = self.resize(inputs) return outputs - -if __name__ == '__main__': - ni = tl.layers.Input([10, 32, 50, 50], name='input') - y = UpSampling2d(scale=(2, 2), data_format='channels_first', ksize=(50, 50))(ni) - print(y) - - class DownSampling2d(Module): """The :class:`DownSampling2d` class is down-sampling 2D layer. @@ -129,26 +122,27 @@ def __init__( scale, method='bilinear', antialias=False, - data_format='channel_last', + data_format='channels_last', name=None, + ksize=None ): super(DownSampling2d, self).__init__(name) self.method = method self.antialias = antialias self.data_format = data_format - + self.ksize = ksize logging.info( "DownSampling2d %s: scale: %s method: %s antialias: %s" % (self.name, scale, self.method, self.antialias) ) - self.build(None) - self._built = True - if isinstance(scale, (list, tuple)) and len(scale) != 2: raise ValueError("scale must be int or tuple/list of length 2") self.scale = (scale, scale) if isinstance(scale, int) else scale + self.build(None) + self._built = True + def __repr__(self): s = '{classname}(scale={scale}, method={method}' if self.name is not None: @@ -157,8 +151,11 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, scale=self.scale, method=self.method, name=self.name) def build(self, inputs_shape): - if self.data_format != 'channel_last': - raise Exception("DownSampling2d tf.image.resize_images only support channel_last") + scale = [1.0 / self.scale[0], 1.0 / self.scale[1]] + self.resize = tl.ops.Resize( + scale=scale, method=self.method, antialias=self.antialias, data_format=self.data_format, + ksize=self.ksize + ) def forward(self, inputs): """ @@ -168,6 +165,6 @@ def forward(self, inputs): inputs : :class:`Tensor` Inputs tensors with 4-D Tensor of the shape (batch, height, width, channels) """ - output_size = [int(inputs.shape[1] * 1.0 / self.scale[0]), int(inputs.shape[2] * 1.0 / self.scale[1])] - outputs = tl.ops.resize(inputs, output_size=output_size, method=self.method, antialias=self.antialias) - return outputs + + outputs = self.resize(inputs) + return outputs \ No newline at end of file diff --git a/tensorlayer/layers/inputs.py b/tensorlayer/layers/inputs.py index 80a7d0c62..44202584e 100644 --- a/tensorlayer/layers/inputs.py +++ b/tensorlayer/layers/inputs.py @@ -3,7 +3,7 @@ import tensorlayer as tl from tensorlayer import logging -from tensorlayer.layers.core import Module, LayerNode +from tensorlayer.layers.core import Module __all__ = ['Input', '_InputLayer'] @@ -39,7 +39,7 @@ def __init__(self, shape, dtype=tl.float32, name=None): self.dtype = dtype self.shape_without_none = [_ if _ is not None else 1 for _ in shape] self.outputs = tl.initializers.ones()(self.shape_without_none, dtype=self.dtype) - # self._built = True + self._built = True # self._add_node(outputs, outputs) def __repr__(self): diff --git a/tensorlayer/layers/lambda_layers.py b/tensorlayer/layers/lambda_layers.py new file mode 100644 index 000000000..1184f2925 --- /dev/null +++ b/tensorlayer/layers/lambda_layers.py @@ -0,0 +1,280 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf + +from tensorlayer import logging +from tensorlayer.files import utils +from tensorlayer.layers.core import Module + +__all__ = [ + 'Lambda', + 'ElementwiseLambda', +] + + +class Lambda(Module): + """A layer that takes a user-defined function using Lambda. + If the function has trainable weights, the weights should be provided. + Remember to make sure the weights provided when the layer is constructed are SAME as + the weights used when the layer is forwarded. + For multiple inputs see :class:`ElementwiseLambda`. + + Parameters + ---------- + fn : function + The function that applies to the inputs (e.g. tensor from the previous layer). + fn_weights : list + The trainable weights for the function if any. Optional. + fn_args : dict + The arguments for the function if any. Optional. + name : str or None + A unique layer name. + + Examples + --------- + Non-parametric and non-args case: + This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). + + >>> x = tl.layers.Input([8, 3], name='input') + >>> y = tl.layers.Lambda(lambda x: 2*x, name='lambda')(x) + + + Non-parametric and with args case: + This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). + + >>> def customize_func(x, foo=42): # x is the inputs, foo is an argument + >>> return foo * x + >>> x = tl.layers.Input([8, 3], name='input') + >>> lambdalayer = tl.layers.Lambda(customize_func, fn_args={'foo': 2}, name='lambda')(x) + + + Any function with outside variables: + This case has not been supported in Model.save() / Model.load() yet. + Please avoid using Model.save() / Model.load() to save / load models that contain such Lambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. + Note: In this case, fn_weights should be a list, and then the trainable weights in this Lambda layer can be added into the weights of the whole model. + + >>> a = tf.Variable(1.0) + >>> def func(x): + >>> return x + a + >>> x = tl.layers.Input([8, 3], name='input') + >>> y = tl.layers.Lambda(func, fn_weights=[a], name='lambda')(x) + + + Parametric case, merge other wrappers into TensorLayer: + This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). + + >>> layers = [ + >>> tf.keras.layers.Dense(10, activation=tf.nn.relu), + >>> tf.keras.layers.Dense(5, activation=tf.nn.sigmoid), + >>> tf.keras.layers.Dense(1, activation=tf.identity) + >>> ] + >>> perceptron = tf.keras.Sequential(layers) + >>> # in order to compile keras model and get trainable_variables of the keras model + >>> _ = perceptron(np.random.random([100, 5]).astype(np.float32)) + >>> + >>> class CustomizeModel(tl.models.Model): + >>> def __init__(self): + >>> super(CustomizeModel, self).__init__() + >>> self.dense = tl.layers.Dense(in_channels=1, n_units=5) + >>> self.lambdalayer = tl.layers.Lambda(perceptron, perceptron.trainable_variables) + >>> + >>> def forward(self, x): + >>> z = self.dense(x) + >>> z = self.lambdalayer(z) + >>> return z + >>> + >>> optimizer = tl.optimizers.Adam(learning_rate=0.1) + >>> model = CustomizeModel() + >>> model.train() + >>> + >>> for epoch in range(50): + >>> with tf.GradientTape() as tape: + >>> pred_y = model(data_x) + >>> loss = tl.cost.mean_squared_error(pred_y, data_y) + >>> + >>> gradients = tape.gradient(loss, model.trainable_weights) + >>> optimizer.apply_gradients(zip(gradients, model.trainable_weights)) + + """ + + def __init__( + self, + fn, + fn_weights=None, + fn_args=None, + name=None, + ): + + super(Lambda, self).__init__(name=name) + self.fn = fn + self._trainable_weights = fn_weights if fn_weights is not None else [] + self.fn_args = fn_args if fn_args is not None else {} + + try: + fn_name = repr(self.fn) + except: + fn_name = 'name not available' + logging.info("Lambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights))) + + self.build() + self._built = True + + def __repr__(self): + s = '{classname}(' + s += 'fn={fn_name},' + s += 'len_weights={len_weights},' + s += 'name=\'{name}\'' + s += ')' + try: + fn_name = repr(self.fn) + except: + fn_name = 'name not available' + return s.format( + classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights), + **self.__dict__ + ) + + def build(self, inputs_shape=None): + pass + + def forward(self, inputs, **kwargs): + + if len(kwargs) == 0: + outputs = self.fn(inputs, **self.fn_args) + else: + outputs = self.fn(inputs, **kwargs) + + return outputs + + def get_args(self): + init_args = {} + if isinstance(self.fn, tf.keras.layers.Layer) or isinstance(self.fn, tf.keras.Model): + init_args.update({"layer_type": "keraslayer"}) + init_args["fn"] = utils.save_keras_model(self.fn) + init_args["fn_weights"] = None + if len(self._nodes) == 0: + init_args["keras_input_shape"] = [] + else: + init_args["keras_input_shape"] = self._nodes[0].in_tensors[0].get_shape().as_list() + else: + init_args = {"layer_type": "normal"} + return init_args + + +class ElementwiseLambda(Module): + """A layer that use a custom function to combine multiple :class:`Layer` inputs. + If the function has trainable weights, the weights should be provided. + Remember to make sure the weights provided when the layer is constructed are SAME as + the weights used when the layer is forwarded. + + Parameters + ---------- + fn : function + The function that applies to the inputs (e.g. tensor from the previous layer). + fn_weights : list + The trainable weights for the function if any. Optional. + fn_args : dict + The arguments for the function if any. Optional. + name : str or None + A unique layer name. + + Examples + -------- + + Non-parametric and with args case + This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). + + >>> # z = mean + noise * tf.exp(std * 0.5) + foo + >>> def func(noise, mean, std, foo=42): + >>> return mean + noise * tf.exp(std * 0.5) + foo + >>> noise = tl.layers.Input([100, 1]) + >>> mean = tl.layers.Input([100, 1]) + >>> std = tl.layers.Input([100, 1]) + >>> out = tl.layers.ElementwiseLambda(fn=func, fn_args={'foo': 84}, name='elementwiselambda')([noise, mean, std]) + + + Non-parametric and non-args case + This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). + + >>> # z = mean + noise * tf.exp(std * 0.5) + >>> noise = tl.layers.Input([100, 1]) + >>> mean = tl.layers.Input([100, 1]) + >>> std = tl.layers.Input([100, 1]) + >>> out = tl.layers.ElementwiseLambda(fn=lambda x, y, z: x + y * tf.exp(z * 0.5), name='elementwiselambda')([noise, mean, std]) + + + Any function with outside variables + This case has not been supported in Model.save() / Model.load() yet. + Please avoid using Model.save() / Model.load() to save / load models that contain such ElementwiseLambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. + Note: In this case, fn_weights should be a list, and then the trainable weights in this ElementwiseLambda layer can be added into the weights of the whole model. + + >>> # z = mean + noise * tf.exp(std * 0.5) + vara + >>> vara = [tf.Variable(1.0)] + >>> def func(noise, mean, std): + >>> return mean + noise * tf.exp(std * 0.5) + vara + >>> noise = tl.layers.Input([100, 1]) + >>> mean = tl.layers.Input([100, 1]) + >>> std = tl.layers.Input([100, 1]) + >>> out = tl.layers.ElementwiseLambda(fn=func, fn_weights=vara, name='elementwiselambda')([noise, mean, std]) + + """ + + def __init__( + self, + fn, + fn_weights=None, + fn_args=None, + name=None, #'elementwiselambda', + ): + + super(ElementwiseLambda, self).__init__(name=name) + self.fn = fn + self._trainable_weights = fn_weights if fn_weights is not None else [] + self.fn_args = fn_args if fn_args is not None else {} + + try: + fn_name = repr(self.fn) + except: + fn_name = 'name not available' + logging.info( + "ElementwiseLambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights)) + ) + + self.build() + self._built = True + + def __repr__(self): + s = '{classname}(' + s += 'fn={fn_name},' + s += 'len_weights={len_weights},' + s += 'name=\'{name}\'' + s += ')' + try: + fn_name = repr(self.fn) + except: + fn_name = 'name not available' + return s.format( + classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights), + **self.__dict__ + ) + + def build(self, inputs_shape=None): + # do nothing + # the weights of the function are provided when the Lambda layer is constructed + pass + + # @tf.function + def forward(self, inputs, **kwargs): + + if not isinstance(inputs, list): + raise TypeError( + "The inputs should be a list of values which corresponds with the customised lambda function." + ) + + if len(kwargs) == 0: + outputs = self.fn(*inputs, **self.fn_args) + else: + outputs = self.fn(*inputs, **kwargs) + + return outputs \ No newline at end of file diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index ff26621ae..8e41d5a97 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -70,7 +70,6 @@ def forward(self, inputs): outputs = self.concat(inputs) return outputs - class Elementwise(Module): """A layer that combines multiple :class:`Layer` that have the same output shapes according to an element-wise operation. @@ -112,6 +111,7 @@ def __init__( super(Elementwise, self).__init__(name, act=act) self.combine_fn = combine_fn + self.combine_fn_str = str(combine_fn).split(' ')[1] self.build(None) self._built = True @@ -122,8 +122,8 @@ def __init__( ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' - s = ('{classname}(combine_fn={combine_fn}, ' + actstr) + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(combine_fn={combine_fn_str}, ' + actstr) if self.name is not None: s += ', name=\'{name}\'' s += ')' @@ -139,23 +139,4 @@ def forward(self, inputs): outputs = self.combine_fn(outputs, input) if self.act: outputs = self.act(outputs) - return outputs - - -# if __name__ == '__main__': -# from tensorlayer.layers import Dense, Input -# class CustomModel(Module): -# def __init__(self): -# super(CustomModel, self).__init__(name="custom") -# self.dense1 = Dense(in_channels=20, n_units=50, act=tl.ReLU, name='relu1_1') -# self.dense2 = Dense(in_channels=20, n_units=50, act=tl.ReLU, name='relu2_1') -# self.concat = Elementwise(combine_fn=tl.ops.minimum, name='minimum', act=tl.ReLU) -# -# def forward(self, inputs): -# d1 = self.dense1(inputs) -# d2 = self.dense2(inputs) -# outputs = self.concat([d1, d2]) -# return outputs -# input = Input(shape=[20, 20]) -# net = CustomModel() -# print(net(input)) + return outputs \ No newline at end of file diff --git a/tensorlayer/layers/noise.py b/tensorlayer/layers/noise.py index d1a164992..65e12fcaf 100644 --- a/tensorlayer/layers/noise.py +++ b/tensorlayer/layers/noise.py @@ -75,13 +75,6 @@ def forward(self, inputs): else: shapes = tl.get_tensor_shape(inputs) noise = tl.ops.random_normal(shape=shapes, mean=self.mean, stddev=self.stddev, seed=self.seed) + print(noise) outputs = inputs + noise return outputs - - -# if __name__ == '__main__': -# from tensorlayer.layers import Dense, Input -# net = Input([64, 200], name='input') -# net = Dense(in_channels=200, n_units=100, act=tl.ReLU, name='dense')(net) -# gaussianlayer = GaussianNoise(name='gaussian')(net) -# print(gaussianlayer) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 113a83d67..5ab2e895b 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -96,16 +96,17 @@ def __init__( self.axes = None - if self.num_features is None: - raise AttributeError( - "The registered layer `{}` should be built in advance. " - "Do you forget to pass the keyword argument 'num_feature'? " - ) + # if self.num_features is None: + # raise AttributeError( + # "The registered layer `{}` should be built in advance. " + # "Do you forget to pass the keyword argument 'num_feature'? " + # ) if self.num_features: self.build(None) self._built = True + if self.decay < 0.0 or 1.0 < self.decay: raise ValueError("decay should be between 0 to 1") @@ -170,6 +171,18 @@ def build(self, inputs_shape): self.act_init_flag = True def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + if not self.is_train: + self.batchnorm = tl.ops.BatchNorm( + decay=self.decay, epsilon=self.epsilon, beta=self.beta, gamma=self.gamma, moving_mean=self.moving_mean, + moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, + is_train=False + ) outputs = self.batchnorm(inputs=inputs) if self.act_init_flag: outputs = self.act(outputs) diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 138f81a42..5a21b5047 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -47,7 +47,7 @@ def __init__( self.padding = padding self.mode = mode - logging.info("PadLayer %s: padding: %s mode: %s" % (self.name, list(self.padding), self.mode)) + logging.info("PadLayer %s: padding: %s mode: %s" % (self.name, self.padding, self.mode)) if self.padding is None: raise Exception( @@ -65,10 +65,10 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass + self.pad = tl.ops.Pad(paddings=self.padding, mode=self.mode) def forward(self, inputs): - outputs = tl.ops.pad(tensor=inputs, paddings=self.padding, mode=self.mode) + outputs = self.pad(inputs) return outputs @@ -131,7 +131,7 @@ class ZeroPad2d(Module): Parameters ---------- - padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. + padding : tuple of 2 ints or int, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. - If tuple of 2 ints, interpreted as two different symmetric padding values for height and width as ``(symmetric_height_pad, symmetric_width_pad)``. - If tuple of 2 tuples of 2 ints, interpreted as ``((top_pad, bottom_pad), (left_pad, right_pad))``. diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 537b64c0b..51cc9a7ce 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -5,7 +5,6 @@ from tensorlayer import logging from tensorlayer.layers.core import Module -# TODO ADD INPUT CHECK __all__ = [ 'PoolLayer', @@ -21,7 +20,7 @@ 'GlobalMeanPool2d', 'GlobalMaxPool3d', 'GlobalMeanPool3d', - # 'CornerPool2d', + 'CornerPool2d', ] @@ -902,73 +901,79 @@ def forward(self, inputs): return outputs -# class CornerPool2d(Layer): -# """Corner pooling for 2D image [batch, height, width, channel], see `here `__. -# -# Parameters -# ---------- -# mode : str -# TopLeft for the top left corner, -# Bottomright for the bottom right corner. -# name : None or str -# A unique layer name. -# -# Examples -# --------- -# With TensorLayer -# -# >>> net = tl.layers.Input([None, 32, 32, 8], name='input') -# >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) -# >>> output shape : [None, 32, 32, 8] -# -# """ -# -# def __init__( -# self, -# mode='TopLeft', -# name=None # 'cornerpool2d' -# ): -# super().__init__(name) -# self.mode = mode -# self.build() -# self._built = True -# -# logging.info("CornerPool2d %s : mode: %s" % (self.name, str(mode))) -# -# def __repr__(self): -# s = ('{classname}(mode={mode}') -# if self.name is not None: -# s += ', name=\'{name}\'' -# s += ')' -# return s.format(classname=self.__class__.__name__, **self.__dict__) -# -# def build(self, inputs_shape=None): -# pass -# -# def forward(self, inputs): -# input_width = inputs.shape[2] -# input_height = inputs.shape[1] -# batch_min = tl.reduce_min(inputs) -# if self.mode == 'TopLeft': -# temp_bottom = tl.pad( -# inputs, tl.constant([[0, 0], [0, input_height - 1], [0, 0], [0, 0]]), constant_values=batch_min -# ) -# temp_right = tl.pad( -# inputs, tl.constant([[0, 0], [0, 0], [0, input_width - 1], [0, 0]]), constant_values=batch_min -# ) -# temp_bottom = tl.ops.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID') -# temp_right = tl.ops.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID') -# outputs = tl.add(temp_bottom, temp_right)#, name=self.name) -# elif self.mode == 'BottomRight': -# temp_top = tl.pad( -# inputs, tl.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min -# ) -# temp_left = tl.pad( -# inputs, tl.constant([[0, 0], [0, 0], [input_width - 1, 0], [0, 0]]), constant_values=batch_min -# ) -# temp_top = tl.ops.max_pool(temp_top, ksize=(input_height, 1), strides=(1, 1), padding='VALID') -# temp_left = tl.ops.max_pool(temp_left, ksize=(1, input_width), strides=(1, 1), padding='VALID') -# outputs = tl.add(temp_top, temp_left, name=self.name) -# else: -# outputs = tl.identity(inputs, name=self.name) -# return outputs +class CornerPool2d(Module): + """Corner pooling for 2D image [batch, height, width, channel], see `here `__. + + Parameters + ---------- + mode : str + TopLeft for the top left corner, + Bottomright for the bottom right corner. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None, 32, 32, 8], name='input') + >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) + >>> output shape : [None, 32, 32, 8] + + """ + + def __init__( + self, + mode='TopLeft', + name=None # 'cornerpool2d' + ): + super().__init__(name) + self.mode = mode + self.build() + self._built = True + + logging.info("CornerPool2d %s : mode: %s" % (self.name, str(mode))) + + def __repr__(self): + s = ('{classname}(mode={mode}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + pass + + def forward(self, inputs): + _, input_width, input_height, _ = tl.get_tensor_shape(inputs) + # input_width = inputs.shape[2] + # input_height = inputs.shape[1] + batch_min = tl.reduce_min(inputs) + if self.mode == 'TopLeft': + temp_bottom = tl.pad( + inputs, tl.constant([[0, 0], [0, input_height - 1], [0, 0], [0, 0]]), constant_values=batch_min + ) + temp_right = tl.pad( + inputs, tl.constant([[0, 0], [0, 0], [0, input_width - 1], [0, 0]]), constant_values=batch_min + ) + temp_bottom = tl.ops.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID') + temp_right = tl.ops.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID') + outputs = tl.add(temp_bottom, temp_right)#, name=self.name) + elif self.mode == 'BottomRight': + temp_top = tl.pad( + inputs, tl.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min + ) + temp_left = tl.pad( + inputs, tl.constant([[0, 0], [0, 0], [input_width - 1, 0], [0, 0]]), constant_values=batch_min + ) + temp_top = tl.ops.max_pool(temp_top, ksize=(input_height, 1), strides=(1, 1), padding='VALID') + temp_left = tl.ops.max_pool(temp_left, ksize=(1, input_width), strides=(1, 1), padding='VALID') + outputs = tl.add(temp_top, temp_left) + else: + outputs = tl.identity(inputs) + return outputs + +if __name__ == '__main__': + net = tl.layers.Input([None, 32, 32, 8], name='input') + net = CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) + print(net) \ No newline at end of file diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py new file mode 100644 index 000000000..edeec330c --- /dev/null +++ b/tensorlayer/layers/recurrent.py @@ -0,0 +1,1264 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module + +# TODO: uncomment +__all__ = [ + 'RNN', + 'SimpleRNN', + 'GRURNN', + 'LSTMRNN', + 'BiRNN', + # 'ConvRNNCell', + # 'BasicConvLSTMCell', + # 'ConvLSTM', + 'retrieve_seq_length_op', + 'retrieve_seq_length_op2', + 'retrieve_seq_length_op3', + 'target_mask_op', +] + + +class RNN(Module): + """ + The :class:`RNN` class is a fixed length recurrent layer for implementing simple RNN, + LSTM, GRU and etc. + + Parameters + ---------- + cell : TensorFlow cell function + A RNN cell implemented by tf.keras + - E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell + - Note TF2.0+, TF1.0+ and TF1.0- are different + + return_last_output : boolean + Whether return last output or all outputs in a sequence. + + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + For synced sequence input and output, see `PTB example `__ + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.RNN( + >>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1), + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='lstmrnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0], rnn_state[1]], name='rnn_model') + >>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM. + + A stacked RNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=False, return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=True, return_last_state=False + >>> )(rnn_out1) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out2) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + An example if the sequences have different length and contain padding. + Similar to the DynamicRNN in TL 1.x. + + If the `sequence_length` is provided in RNN's forwarding and both `return_last_output` and `return_last_state` + are set as `True`, the forward function will automatically ignore the paddings. Note that if `return_last_output` + is set as `False`, the synced sequence outputs will still include outputs which correspond with paddings, + but users are free to select which slice of outputs to be used in following procedure. + + The `sequence_length` should be a list of integers which indicates the length of each sequence. + It is recommended to + `tl.layers.retrieve_seq_length_op3 `__ + to calculate the `sequence_length`. + + >>> data = [[[1], [2], [0], [0], [0]], [[1], [2], [3], [0], [0]], [[1], [2], [6], [1], [1]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> class DynamicRNNExample(tl.models.Model): + >>> def __init__(self): + >>> super(DynamicRNNExample, self).__init__() + >>> self.rnnlayer = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=6, dropout=0.1), in_channels=1, return_last_output=True, + >>> return_last_state=True + >>> ) + >>> def forward(self, x): + >>> z, s = self.rnnlayer(x, sequence_length=tl.layers.retrieve_seq_length_op3(x)) + >>> return z, s + >>> model = DynamicRNNExample() + >>> model.eval() + >>> output, state = model(data) + + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + cell, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'rnn' + ): + + super(RNN, self).__init__(name=name) + + self.cell = cell + self.return_last_output = return_last_output + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info("RNN %s: cell: %s, n_units: %s" % (self.name, self.cell.__class__.__name__, self.cell.units)) + + def __repr__(self): + s = ('{classname}(cell={cellname}, n_units={n_units}') + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, cellname=self.cell.__class__.__name__, n_units=self.cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, sequence_length=None, initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + sequence_length: None or list of integers + The actual length of each sequence in batch without padding. + If provided, when `return_last_output` and `return_last_state` are `True`, + the RNN will perform in the manner of a dynamic RNN, i.e. + the RNN will return the actual last output / state without padding. + initial_state : None or list of Tensor (RNN State) + If None, `initial_state` is zero state. + + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + batch_size = inputs.get_shape().as_list()[0] + total_steps = inputs.get_shape().as_list()[1] + + # checking the type and values of sequence_length + if sequence_length is not None: + if isinstance(sequence_length, list): + pass + elif isinstance(sequence_length, tf.Tensor): + pass + elif isinstance(sequence_length, np.ndarray): + sequence_length = sequence_length.tolist() + else: + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "Type got %s" % type(sequence_length) + ) + if (len(sequence_length) != batch_size): + raise ValueError( + "The argument sequence_length should contain %d " % batch_size + + "elements indicating the initial length of each sequence, but got only %d. " % len(sequence_length) + ) + for i in sequence_length: + if not (type(i) is int or (isinstance(i, tf.Tensor) and i.dtype.is_integer)): + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "One element of sequence_length has the type %s" % type(i) + ) + if i > total_steps: + raise ValueError( + "The actual length of a sequence should not be longer than " + "that of the longest sequence (total steps) in this mini-batch. " + "Total steps of this mini-batch %d, " % total_steps + + "but got an actual length of a sequence %d" % i + ) + + sequence_length = tl.layers.retrieve_seq_length_op3(inputs) + + sequence_length = [i - 1 if i >= 1 else 0 for i in sequence_length] + + # set warning + # if (not self.return_last_output) and sequence_length is not None: + # warnings.warn( + # 'return_last_output is set as %s ' % self.return_last_output + + # 'When sequence_length is provided, it is recommended to set as True. ' + + # 'Otherwise, padding will be considered while RNN is forwarding.' + # ) + + # return the last output, iterating each seq including padding ones. No need to store output during each + # time step. + if self.return_last_output and sequence_length is None: + outputs = [-1] + else: + outputs = list() + + # initialize the states if provided + states = initial_state if initial_state is not None else self.cell.get_initial_state(inputs) + if not isinstance(states, list): + states = [states] + + stored_states = list() + + # initialize the cell + self.cell.reset_dropout_mask() + self.cell.reset_recurrent_dropout_mask() + + # recurrent computation + # FIXME: if sequence_length is provided (dynamic rnn), only iterate max(sequence_length) times. + for time_step in range(total_steps): + + cell_output, states = self.cell.call(inputs[:, time_step, :], states, training=self.is_train) + stored_states.append(states) + + if self.return_last_output and sequence_length is None: + outputs[-1] = cell_output + else: + outputs.append(cell_output) + + # prepare to return results + if self.return_last_output and sequence_length is None: + outputs = outputs[-1] + + elif self.return_last_output and sequence_length is not None: + outputs = tf.convert_to_tensor(outputs) + outputs = tf.gather(outputs, sequence_length, axis=0) + + outputs_without_padding = [] + for i in range(batch_size): + outputs_without_padding.append(outputs[i][i][:]) + outputs = tf.convert_to_tensor(outputs_without_padding) + else: + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, self.cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, total_steps, self.cell.units]) + + if self.return_last_state and sequence_length is None: + return outputs, states + elif self.return_last_state and sequence_length is not None: + + stored_states = tf.convert_to_tensor(stored_states) + stored_states = tf.gather(stored_states, sequence_length, axis=0) + + states = [] + for i in range(stored_states.shape[1]): + states.append(tf.convert_to_tensor([stored_states[b, i, b, :] for b in range(batch_size)])) + + return outputs, states + else: + return outputs + + +class SimpleRNN(RNN): + """ + The :class:`SimpleRNN` class is a fixed length recurrent layer for implementing simple RNN. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the simple RNN cell. + Please check tf.keras.layers.SimpleRNNCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.SimpleRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the simple rnn cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='simplernn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'simplernn' + **kwargs + ): + super(SimpleRNN, self).__init__( + cell=tf.keras.layers.SimpleRNNCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class GRURNN(RNN): + """ + The :class:`GRURNN` class is a fixed length recurrent layer for implementing RNN with GRU cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For GRU, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the GRU cell. + Please check tf.keras.layers.GRUCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.GRURNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the GRU cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'grurnn' + **kwargs + ): + super(GRURNN, self).__init__( + cell=tf.keras.layers.GRUCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class LSTMRNN(RNN): + """ + The :class:`LSTMRNN` class is a fixed length recurrent layer for implementing RNN with LSTM cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the LSTM cell. + Please check tf.keras.layers.LSTMCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.LSTMRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the LSTM cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'lstmrnn' + **kwargs + ): + super(LSTMRNN, self).__init__( + cell=tf.keras.layers.LSTMCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class BiRNN(Layer): + """ + The :class:`BiRNN` class is a fixed length Bidirectional recurrent layer. + + Parameters + ---------- + fw_cell : TensorFlow cell function for forward direction + A RNN cell implemented by tf.keras, e.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell. + Note TF2.0+, TF1.0+ and TF1.0- are different + bw_cell: TensorFlow cell function for backward direction similar with `fw_cell` + return_seq_2d : boolean. + If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the two cells. The state is a list of Tensor. + - If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> # the fw_cell and bw_cell can be different + >>> rnnlayer = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=True + >>> ) + >>> # if return_last_state=True, the final state of the two cells will be returned together with the outputs + >>> # if return_last_state=False, only the outputs will be returned + >>> rnn_out, rnn_fw_state, rnn_bw_state = rnnlayer(inputs) + >>> # if the BiRNN is followed by a Dense, return_seq_2d should be True. + >>> # if the BiRNN is followed by other RNN, return_seq_2d can be False. + >>> dense = tl.layers.Dense(n_units=1)(rnn_out) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]]) + + A stacked BiRNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=False + >>> )(rnn_out1) + >>> dense = tl.layers.Dense(n_units=1)(rnn_out2) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + fw_cell, + bw_cell, + return_seq_2d=False, + return_last_state=False, + in_channels=None, + name=None, # 'birnn' + ): + super(BiRNN, self).__init__(name) + + self.fw_cell = fw_cell + self.bw_cell = bw_cell + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info( + "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % ( + self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__, + self.bw_cell.units + ) + ) + + def __repr__(self): + s = ( + '{classname}(fw_cell={fw_cellname}, fw_n_units={fw_n_units}' + ', bw_cell={bw_cellname}, bw_n_units={bw_n_units}' + ) + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, fw_cellname=self.fw_cell.__class__.__name__, + fw_n_units=self.fw_cell.units, bw_cellname=self.bw_cell.__class__.__name__, bw_n_units=self.bw_cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.fw_cell.build(tuple(inputs_shape)) + self.bw_cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.fw_cell.trainable_variables: + self._trainable_weights.append(var) + for var in self.bw_cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, fw_initial_state=None, bw_initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + fw_initial_state : None or list of Tensor (RNN State) + If None, `fw_initial_state` is zero state. + bw_initial_state : None or list of Tensor (RNN State) + If None, `bw_initial_state` is zero state. + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + fw_outputs = list() + bw_outputs = list() + + fw_states = fw_initial_state if fw_initial_state is not None else self.fw_cell.get_initial_state(inputs) + bw_states = bw_initial_state if bw_initial_state is not None else self.bw_cell.get_initial_state(inputs) + + if not isinstance(fw_states, list): + fw_states = [fw_states] + if not isinstance(bw_states, list): + bw_states = [bw_states] + + total_steps = inputs.get_shape().as_list()[1] + + self.fw_cell.reset_dropout_mask() + self.fw_cell.reset_recurrent_dropout_mask() + self.bw_cell.reset_dropout_mask() + self.bw_cell.reset_recurrent_dropout_mask() + + for time_step in range(total_steps): + fw_cell_output, fw_states = self.fw_cell.call(inputs[:, time_step, :], fw_states, training=self.is_train) + bw_cell_output, bw_states = self.bw_cell.call( + inputs[:, -time_step - 1, :], bw_states, training=self.is_train + ) + + fw_outputs.append(fw_cell_output) + bw_outputs.append(bw_cell_output) + + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, self.bw_cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, total_steps, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, total_steps, self.bw_cell.units]) + + outputs = tf.concat([fw_outputs, bw_outputs], -1) + + if self.return_last_state: + return outputs, fw_states, bw_states + else: + return outputs + + +''' +class ConvRNNCell(object): + """Abstract object representing an Convolutional RNN Cell.""" + + def __call__(self, inputs, state, scope=None): + """Run this RNN cell on inputs, starting from the given state.""" + raise NotImplementedError("Abstract method") + + @property + def state_size(self): + """size(s) of state(s) used by this cell.""" + raise NotImplementedError("Abstract method") + + @property + def output_size(self): + """Integer or TensorShape: size of outputs produced by this cell.""" + raise NotImplementedError("Abstract method") + + def zero_state(self, batch_size): #, dtype=LayersConfig.tf_dtype): + """Return zero-filled state tensor(s). + Args: + batch_size: int, float, or unit Tensor representing the batch size. + Returns: + tensor of shape '[batch_size x shape[0] x shape[1] x num_features] + filled with zeros + + """ + dtype = LayersConfig.tf_dtype + shape = self.shape + num_features = self.num_features + # TODO : TypeError: 'NoneType' object is not subscriptable + zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) + return zeros + + +class BasicConvLSTMCell(ConvRNNCell): + """Basic Conv LSTM recurrent network cell. + + Parameters + ----------- + shape : tuple of int + The height and width of the cell. + filter_size : tuple of int + The height and width of the filter + num_features : int + The hidden size of the cell + forget_bias : float + The bias added to forget gates (see above). + input_size : int + Deprecated and unused. + state_is_tuple : boolen + If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. + If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. + act : activation function + The activation function of this layer, tanh as default. + + """ + + def __init__( + self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, + act=tf.nn.tanh + ): + """Initialize the basic Conv LSTM cell.""" + # if not state_is_tuple: + # logging.warn("%s: Using a concatenated state is slower and will soon be " + # "deprecated. Use state_is_tuple=True.", self) + if input_size is not None: + logging.warn("%s: The input_size parameter is deprecated.", self) + self.shape = shape + self.filter_size = filter_size + self.num_features = num_features + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = act + + @property + def state_size(self): + """State size of the LSTMStateTuple.""" + return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) + + @property + def output_size(self): + """Number of units in outputs.""" + return self._num_units + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM).""" + with tf.compat.v1.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" + # Parameters of gates are concatenated into one multiply for efficiency. + if self._state_is_tuple: + c, h = state + else: + # print state + # c, h = tf.split(3, 2, state) + c, h = tf.split(state, 2, 3) + concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + # i, j, f, o = tf.split(3, 4, concat) + i, j, f, o = tf.split(concat, 4, 3) + + new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) + new_h = self._activation(new_c) * tf.nn.sigmoid(o) + + if self._state_is_tuple: + new_state = LSTMStateTuple(new_c, new_h) + else: + new_state = tf.concat([new_c, new_h], 3) + return new_h, new_state + + +def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): + """convolution: + + Parameters + ---------- + args : tensor + 4D Tensor or a list of 4D, batch x n, Tensors. + filter_size : tuple of int + Filter height and width. + num_features : int + Nnumber of features. + bias_start : float + Starting value to initialize the bias; 0 by default. + scope : VariableScope + For the created subgraph; defaults to "Linear". + + Returns + -------- + - A 4D Tensor with shape [batch h w num_features] + + Raises + ------- + - ValueError : if some of the arguments has unspecified or wrong shape. + + """ + # Calculate the total size of arguments on dimension 1. + total_arg_size_depth = 0 + shapes = [a.get_shape().as_list() for a in args] + for shape in shapes: + if len(shape) != 4: + raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) + if not shape[3]: + raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) + else: + total_arg_size_depth += shape[3] + + dtype = [a.dtype for a in args][0] + + # Now the computation. + with tf.compat.v1.variable_scope(scope or "Conv"): + matrix = tf.compat.v1.get_variable( + "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype + ) + if len(args) == 1: + res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') + else: + res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') + if not bias: + return res + bias_term = tf.compat.v1.get_variable( + "Bias", [num_features], dtype=dtype, + initializer=tf.compat.v1.initializers.constant(bias_start, dtype=dtype) + ) + return res + bias_term + + +class ConvLSTM(Layer): + """A fixed length Convolutional LSTM layer. + + See this `paper `__ . + + Parameters + ---------- + prev_layer : :class:`Layer` + Previous layer + cell_shape : tuple of int + The shape of each cell width * height + filter_size : tuple of int + The size of filter width * height + cell_fn : a convolutional RNN cell + Cell function like :class:`BasicConvLSTMCell` + feature_map : int + The number of feature map in the layer. + initializer : initializer + The initializer for initializing the parameters. + n_steps : int + The sequence length. + initial_state : None or ConvLSTM State + If None, `initial_state` is zero state. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output". + - If False, return all outputs, "Synced sequence input and output". + - In other word, if you want to stack more RNNs on this layer, set to False. + + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The output of this RNN. return_last_output = False, outputs = all cell_output, which is the hidden state. + cell_output.get_shape() = (?, h, w, c]) + + final_state : tensor or StateTuple + The finial state of this layer. + - When state_is_tuple = False, it is the final hidden and cell states, + - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : tensor or StateTuple + It is the initial state of this ConvLSTM layer, you can use it to initialize + your state at the beginning of each epoch or iteration according to your + training procedure. + + batch_size : int or tensor + Is int, if able to compute the batch_size, otherwise, tensor for ``?``. + + """ + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__( + self, + prev_layer, + cell_shape=None, + feature_map=1, + filter_size=(3, 3), + cell_fn=BasicConvLSTMCell, + initializer=tf.compat.v1.initializers.random_uniform(-0.1, 0.1), + n_steps=5, + initial_state=None, + return_last=False, + return_seq_2d=False, + name='convlstm', + ): + super(ConvLSTM, self).__init__(prev_layer=prev_layer, name=name) + + logging.info( + "ConvLSTM %s: feature_map: %d, n_steps: %d, " + "in_dim: %d %s, cell_fn: %s " % + (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) + ) + # You can get the dimension by .get_shape() or ._shape, and check the + # dimension by .with_rank() as follow. + # self.inputs.get_shape().with_rank(2) + # self.inputs.get_shape().with_rank(3) + + # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] + try: + self.inputs.get_shape().with_rank(5) + except Exception: + raise Exception( + "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " + "input_y, feature_map]" + ) + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) + + else: + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + outputs = [] + self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) + + if initial_state is None: + self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) + else: + self.initial_state = initial_state + + state = self.initial_state + + # with tf.variable_scope("model", reuse=None, initializer=initializer): + with tf.compat.v1.variable_scope(name, initializer=initializer) as vs: + for time_step in range(n_steps): + if time_step > 0: tf.compat.v1.get_variable_scope().reuse_variables() + (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) + outputs.append(cell_output) + + # Retrieve just the RNN variables. + # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] + rnn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + if return_last: + # 2D Tensor [batch_size, n_hidden] + self.outputs = outputs[-1] + else: + if return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 4D Tensor [n_example, h, w, c] + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) + else: + # : stack more RNN layer after that + # 5D Tensor [n_example/n_steps, n_steps, h, w, c] + self.outputs = tf.reshape( + tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map] + ) + + self.final_state = state + + self._add_layers(self.outputs) + self._add_params(rnn_variables) + +''' + + +# @tf.function +def retrieve_seq_length_op(data): + """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max), n_features] with zero padding on right hand side. + + Examples + ----------- + Single feature + + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [2 3 4] + + Multiple features + + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [4 3 4] + + References + ------------ + Borrow from `TFlearn `__. + + """ + with tf.name_scope('GetLength'): + used = tf.sign(tf.reduce_max(input_tensor=tf.abs(data), axis=2)) + length = tf.reduce_sum(input_tensor=used, axis=1) + + return tf.cast(length, tf.int32) + + +# @tf.function +def retrieve_seq_length_op2(data): + """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] with zero padding on right hand side. + + Examples + ----------- + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op2(data) + tensor([2 3 4]) + + """ + return tf.reduce_sum(input_tensor=tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), axis=1) + + +# @tf.function +def retrieve_seq_length_op3(data, pad_val=0): + """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or + [batch_size, n_step(max), n_features]. + + If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the + length of the string sequence. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side. + pad_val: + By default 0. If the data is tf.string, please assign this as empty string ('') + + Examples + ----------- + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([4, 3, 4]) + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [['hello','world','','',''], + >>> ['hello','world','tensorlayer','',''], + >>> ['hello','world','tensorlayer','2.0','']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='') + tensor([2, 3, 4]) + + """ + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.reduce_sum( + input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), + axis=1 + ) + elif data_shape_size == 2: + return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1) + elif data_shape_size == 1: + raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list()) + else: + raise ValueError( + "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size) + ) + + +def target_mask_op(data, pad_val=0): + """ Return the mask of the input sequence data based on the padding values. + + Parameters + ----------- + data : tf.Tensor + A tensor with 2 or 3 dimensions. + pad_val: int, float, string, etc + The value that represent padding. By default, 0. For tf.string, you may use empty string. + + Examples + ----------- + >>> data = [['hello', 'world', '', '', ''], + >>> ['hello', 'world', 'tensorlayer', '', ''], + >>> ['hello', 'world', 'tensorlayer', '2.0', '']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> mask = tl.layers.target_mask_op(data, pad_val='') + >>> print(mask) + tf.Tensor( + [[1 1 0 0 0] + [1 1 1 0 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[1], [0], [0], [0], [0]], + >>> [[1], [2], [3], [0], [0]], + >>> [[1], [2], [0], [1], [0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[1 0 0 0 0] + [1 1 1 0 0] + [1 1 0 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[0,0],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[1,0],[0,0]], + >>> [[3,3],[0,1],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[0 1 1 1 0] + [1 1 1 1 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + """ + + if not isinstance(data, tf.Tensor): + raise AttributeError("target_mask_op: the type of input data should be tf.Tensor but got %s." % type(data)) + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) + elif data_shape_size == 2: + return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) + elif data_shape_size == 1: + raise ValueError( + "target_mask_op: data_shape %s is not supported. " + "The shape of data should have 2 or 3 dims." % (data.get_shape()) + ) + else: + raise ValueError( + "target_mask_op: handling data_shape %s hasn't been implemented! " + "The shape of data should have 2 or 3 dims" % (data.get_shape()) + ) diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index 2101a008c..b231d1b0e 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -427,3 +427,14 @@ def _compute_threshold(x): threshold = tf.math.divide(x_sum, tf.cast(tf.size(input=x), tf.float32), name=None) threshold = tf.multiply(0.7, threshold, name=None) return threshold + + +def mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance): + with tf.control_dependencies([update_moving_mean, update_moving_variance]): + return tf.identity(mean), tf.identity(variance) + +def w_fold(w, gama, var, epsilon): + return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) + +def bias_fold(beta, gama, mean, var, epsilon): + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file diff --git a/tensorlayer/optimizers/mindspore_optimizer.py b/tensorlayer/optimizers/mindspore_optimizer.py index cb0b41107..659a74937 100644 --- a/tensorlayer/optimizers/mindspore_optimizer.py +++ b/tensorlayer/optimizers/mindspore_optimizer.py @@ -31,13 +31,13 @@ class Adam(Cell): def __init__( self, - lr=0.001, + learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, ): self.adam = optimizer.Adam - self.learn_rate = lr + self.learn_rate = learning_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon @@ -121,7 +121,7 @@ def apply_gradients(self, grads_and_vars, **kwargs): optimizer_mom(grads) -class Lamb(object): +class Lamb(Cell): def __init__( self, decay_steps, warmup_steps=0, start_learning_rate=0.1, end_learning_rate=0.0001, power=1.0, beta1=0.9, diff --git a/tensorlayer/prepro.py b/tensorlayer/prepro.py index 3ba2f308c..def019971 100644 --- a/tensorlayer/prepro.py +++ b/tensorlayer/prepro.py @@ -21,6 +21,7 @@ from skimage.morphology import binary_erosion as _binary_erosion from skimage.morphology import disk from skimage.morphology import erosion as _erosion +from skimage.transform import resize import tensorlayer as tl from tensorlayer.lazy_imports import LazyImport @@ -1840,11 +1841,13 @@ def imresize(x, size=None, interp='bicubic', mode=None): if x.shape[-1] == 1: # greyscale - x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode) + # x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode) + x = resize(x[:, :, 0], size) return x[:, :, np.newaxis] else: # rgb, bgr, rgba - return scipy.misc.imresize(x, size, interp=interp, mode=mode) + return resize(x, output_shape=size) + # return scipy.misc.imresize(x, size, interp=interp, mode=mode) # value scale From 1783e1b7c709e5d2508d5fb50f89699385dfe176 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 19 Apr 2021 11:37:59 +0800 Subject: [PATCH 05/36] update examples --- .../basic_tutorials/tutorial_LayerList.py | 43 + ...tutorial_cifar10_cnn_dynamic_MS_backend.py | 166 +++ ...utorial_cifar10_cnn_dynamic_TF_backend.py} | 0 ... tutorial_mnist_mlp_dynamic_MS_backend.py} | 0 ... tutorial_mnist_mlp_dynamic_TF_backend.py} | 182 +-- ...ore.py => tutorial_mnist_mlp_mindspore.py} | 8 +- .../tutorial_ms_cifar10_simple.py | 163 --- .../tutorial_nested_usage_of_Layer.py | 211 ++++ examples/model_zoo/__init__.py | 0 examples/model_zoo/common.py | 287 +++++ examples/model_zoo/imagenet_classes.py | 1003 +++++++++++++++++ examples/model_zoo/model/coco.names | 80 ++ examples/model_zoo/model/weights_2.txt | 541 +++++++++ examples/model_zoo/model/weights_3.txt | 541 +++++++++ .../model/yolov4_weights3_config.txt | 541 +++++++++ .../model_zoo/model/yolov4_weights_config.txt | 541 +++++++++ examples/model_zoo/pretrained_resnet50.py | 32 + examples/model_zoo/pretrained_vgg16.py | 29 + examples/model_zoo/pretrained_yolov4.py | 28 + examples/model_zoo/resnet.py | 225 ++++ examples/model_zoo/vgg.py | 347 ++++++ examples/model_zoo/yolo.py | 376 ++++++ 22 files changed, 5086 insertions(+), 258 deletions(-) create mode 100644 examples/basic_tutorials/tutorial_LayerList.py create mode 100644 examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py rename examples/basic_tutorials/{tutorial_cifar10_cnn_dynamic.py => tutorial_cifar10_cnn_dynamic_TF_backend.py} (100%) rename examples/basic_tutorials/{tutorial_tensorlayer_mindspore.py => tutorial_mnist_mlp_dynamic_MS_backend.py} (100%) rename examples/basic_tutorials/{tutorial_mnist_mlp_dynamic.py => tutorial_mnist_mlp_dynamic_TF_backend.py} (97%) rename examples/basic_tutorials/{tutorial_mindspore.py => tutorial_mnist_mlp_mindspore.py} (97%) delete mode 100644 examples/basic_tutorials/tutorial_ms_cifar10_simple.py create mode 100644 examples/basic_tutorials/tutorial_nested_usage_of_Layer.py create mode 100644 examples/model_zoo/__init__.py create mode 100644 examples/model_zoo/common.py create mode 100644 examples/model_zoo/imagenet_classes.py create mode 100644 examples/model_zoo/model/coco.names create mode 100644 examples/model_zoo/model/weights_2.txt create mode 100644 examples/model_zoo/model/weights_3.txt create mode 100644 examples/model_zoo/model/yolov4_weights3_config.txt create mode 100644 examples/model_zoo/model/yolov4_weights_config.txt create mode 100644 examples/model_zoo/pretrained_resnet50.py create mode 100644 examples/model_zoo/pretrained_vgg16.py create mode 100644 examples/model_zoo/pretrained_yolov4.py create mode 100644 examples/model_zoo/resnet.py create mode 100644 examples/model_zoo/vgg.py create mode 100644 examples/model_zoo/yolo.py diff --git a/examples/basic_tutorials/tutorial_LayerList.py b/examples/basic_tutorials/tutorial_LayerList.py new file mode 100644 index 000000000..2b60fecf8 --- /dev/null +++ b/examples/basic_tutorials/tutorial_LayerList.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from tensorlayer.layers import LayerList +from tensorlayer.layers import Dense +import tensorlayer as tl +import numpy as np + +layer_list = [] +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) +layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) +MLP = LayerList(layer_list) + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield (_input, np.array(_target)) + +n_epoch = 50 +batch_size = 128 +print_freq = 2 +shuffle_buffer_size = 128 + +# train_weights = MLP.trainable_weights +# print(train_weights) +optimizer = tl.optimizers.Momentum(0.05, 0.9) +train_ds = tl.dataflow.FromGenerator( + generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] +) +train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) +train_ds = tl.dataflow.Batch(train_ds,batch_size) + + +model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) +model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model.save_weights('./model.npz', format='npz_dict') +model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py new file mode 100644 index 000000000..02ab3e847 --- /dev/null +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import time +import numpy as np +import multiprocessing +import tensorflow as tf +from tensorlayer.layers import Module +import tensorlayer as tl +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) + +from mindspore.nn import Momentum, WithLossCell +from mindspore import ParameterTuple +import mindspore.nn as nn +import mindspore as ms +from mindspore.ops import composite as C +import mindspore.ops.operations as P + +# enable debug logging +tl.logging.set_verbosity(tl.logging.DEBUG) +tl.logging.set_verbosity(tl.logging.DEBUG) + +class CNN(Module): + def __init__(self): + super(CNN, self).__init__() + self.conv1 = Conv2d(64, (5, 5), (2, 2), padding='SAME', b_init=None, name='conv1', in_channels=3, act=tl.ReLU, data_format='channels_first') + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU, data_format='channels_first') + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1', data_format='channels_first') + self.conv2 = Conv2d(128, (5, 5), (2, 2), padding='SAME', act=tl.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first') + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2', data_format='channels_first') + + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=4608) + self.dense2 = Dense(84, act=tl.ReLU, name='dense2relu', in_channels=120) + self.dense3 = Dense(10, act=None, name='output', in_channels=84) + + + def forward(self, x): + z = self.conv1(x) + z = self.bn(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense2(z) + z = self.dense3(z) + return z + +# training settings +batch_size = 128 +n_epoch = 500 +shuffle_buffer_size = 128 + + +# prepare cifar10 data +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield _input, _target + + +def generator_test(): + inputs = X_test + targets = y_test + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +class GradWrap(Module): + """ GradWrap definition """ + + def __init__(self, network): + super(GradWrap, self).__init__(auto_prefix=False) + self.network = network + self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) + + def forward(self, x, label): + return C.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) + + +# dataset API and augmentation +train_ds = tf.data.Dataset.from_generator( + generator_train, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) +# train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) + +# get the network +net = CNN() +train_weights = net.trainable_weights +# optimizer = Adam(train_weights, learning_rate=0.01) +optimizer = Momentum(train_weights, 0.01, 0.5) +criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +net_with_criterion = WithLossCell(net, criterion) +train_network = GradWrap(net_with_criterion) +train_network.set_train() +# print(train_weights) +for epoch in range(n_epoch): + start_time = time.time() + train_network.set_train() + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_ds: + X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) + y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) + X_batch = tl.nhwc_to_nchw(X_batch) + y_batch = tl.nhwc_to_nchw(y_batch) + output = net(X_batch) + loss_output = criterion(output, y_batch) + grads = train_network(X_batch, y_batch) + success = optimizer(grads) + loss = loss_output.asnumpy() + train_loss += loss + n_iter += 1 + train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + print(" loss ", loss) + +# start_time = time.time() + +# train_loss, train_acc, n_iter = 0, 0, 0 +# for X_batch, y_batch in train_ds: +# net.set_train() + +# with tf.GradientTape() as tape: +# # compute outputs +# _logits = net(X_batch) +# # compute loss and update model +# _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + +# grad = tape.gradient(_loss_ce, train_weights) +# optimizer.apply_gradients(zip(grad, train_weights)) + +# train_loss += _loss_ce +# train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) +# n_iter += 1 + +# print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) +# print(" train loss: {}".format(train_loss / n_iter)) +# print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic.py b/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py similarity index 100% rename from examples/basic_tutorials/tutorial_cifar10_cnn_dynamic.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py diff --git a/examples/basic_tutorials/tutorial_tensorlayer_mindspore.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py similarity index 100% rename from examples/basic_tutorials/tutorial_tensorlayer_mindspore.py rename to examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py similarity index 97% rename from examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py rename to examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py index 50186c009..128739182 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py @@ -1,91 +1,91 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import numpy as np -import time - -import tensorflow as tf -import tensorlayer as tl -from tensorlayer.layers import Module -from tensorlayer.layers import Dense, Dropout, BatchNorm1d - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - -class CustomModel(Module): - - def __init__(self): - super(CustomModel, self).__init__() - self.dropout1 = Dropout(keep=0.8) - self.dense1 = Dense(n_units=800, in_channels=784) - self.batchnorm = BatchNorm1d(act=tl.ReLU, num_features=800) - self.dropout2 = Dropout(keep=0.8) - self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) - self.dropout3 = Dropout(keep=0.8) - self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) - - def forward(self, x, foo=None): - z = self.dropout1(x) - z = self.dense1(z) - z = self.batchnorm(z) - z = self.dropout2(z) - z = self.dense2(z) - z = self.dropout3(z) - out = self.dense3(z) - if foo is not None: - out = tl.ops.relu(out) - return out - - -MLP = CustomModel() -n_epoch = 50 -batch_size = 500 -print_freq = 5 -train_weights = MLP.trainable_weights -optimizer = tl.optimizers.Adam(lr=0.0001) - -for epoch in range(n_epoch): ## iterate the dataset n_epoch times - start_time = time.time() - ## iterate over the entire training set once (shuffle the data via training) - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - MLP.set_train() # enable dropout - with tf.GradientTape() as tape: - ## compute outputs - _logits = MLP(X_batch) - ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - grad = tape.gradient(_loss, train_weights) - optimizer.apply_gradients(zip(grad, train_weights)) - - ## use training and evaluation sets to evaluate the model every print_freq epoch - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.set_train() - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): - _logits = MLP(X_batch) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc / n_iter)) - - val_loss, val_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): - _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') - val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 - print(" val loss: {}".format(val_loss / n_iter)) - print(" val acc: {}".format(val_acc / n_iter)) - -## use testing data to evaluate the model -MLP.eval() -test_loss, test_acc, n_iter = 0, 0, 0 -for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): - _logits = MLP(X_batch, foo=1) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') - test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) - n_iter += 1 -print(" test foo=1 loss: {}".format(val_loss / n_iter)) -print(" test foo=1 acc: {}".format(val_acc / n_iter)) +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import numpy as np +import time + +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout, BatchNorm1d + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, in_channels=784) + self.batchnorm = BatchNorm1d(act=tl.ReLU, num_features=800) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + z = self.batchnorm(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out + + +MLP = CustomModel() +n_epoch = 50 +batch_size = 500 +print_freq = 5 +train_weights = MLP.trainable_weights +optimizer = tl.optimizers.Adam(lr=0.0001) + +for epoch in range(n_epoch): ## iterate the dataset n_epoch times + start_time = time.time() + ## iterate over the entire training set once (shuffle the data via training) + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + MLP.set_train() # enable dropout + with tf.GradientTape() as tape: + ## compute outputs + _logits = MLP(X_batch) + ## compute loss and update model + _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + grad = tape.gradient(_loss, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) + + ## use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + MLP.set_train() + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): + _logits = MLP(X_batch) + train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): + _logits = MLP(X_batch) # is_train=False, disable dropout + val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + +## use testing data to evaluate the model +MLP.eval() +test_loss, test_acc, n_iter = 0, 0, 0 +for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): + _logits = MLP(X_batch, foo=1) + test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 +print(" test foo=1 loss: {}".format(val_loss / n_iter)) +print(" test foo=1 acc: {}".format(val_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mindspore.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py similarity index 97% rename from examples/basic_tutorials/tutorial_mindspore.py rename to examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py index 30facdbcb..3e552d3eb 100644 --- a/examples/basic_tutorials/tutorial_mindspore.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py @@ -29,7 +29,7 @@ def weight_variable(): return TruncatedNormal(0.02) -class LeNet5(nn.Cell): +class MLP(nn.Cell): """ Lenet network Args: @@ -39,11 +39,11 @@ class LeNet5(nn.Cell): Tensor, output tensor Examples: - >>> LeNet(num_class=10) + >>> MLP(num_class=10) """ def __init__(self, num_class=10): - super(LeNet5, self).__init__() + super(MLP, self).__init__() self.num_class = num_class self.fc1 = fc_with_initialize(784, 800) self.fc2 = fc_with_initialize(800, 800) @@ -81,7 +81,7 @@ def generator_train(): yield _input, _target -net = LeNet5() +net = MLP() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) net_with_criterion = WithLossCell(net, criterion) diff --git a/examples/basic_tutorials/tutorial_ms_cifar10_simple.py b/examples/basic_tutorials/tutorial_ms_cifar10_simple.py deleted file mode 100644 index a31e99b64..000000000 --- a/examples/basic_tutorials/tutorial_ms_cifar10_simple.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import numpy as np -import time -import os -# os.environ['TL_BACKEND'] = 'tensorflow' -os.environ['TL_BACKEND'] = 'mindspore' - -import tensorflow as tf -import tensorlayer as tl -from tensorlayer.layers import Module -from tensorlayer.layers import Dense, Dropout - -from mindspore.common import ParameterTuple -import mindspore as ms -import mindspore.dataset as ds -from mindspore.ops import composite -from mindspore.ops import operations as P -from mindspore.ops import functional as F -import mindspore.dataset.transforms.vision.c_transforms as C -import mindspore.dataset.transforms.c_transforms as C2 -from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, TrainOneStepCell, WithLossCell -from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) -from mindspore.train.parallel_utils import ParallelMode -from mindspore.nn.wrap import DistributedGradReducer -from mindspore import context -context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") - - -class CustomModel(Module): - - def __init__(self): - super(CustomModel, self).__init__() - self.dropout1 = Dropout(keep=0.8) - self.dense1 = Dense(n_units=800, in_channels=784, act=tl.ReLU) - self.dropout2 = Dropout(keep=0.8) - self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) - self.dropout3 = Dropout(keep=0.8) - self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) - - def forward(self, x, foo=None): - z = self.dropout1(x) - z = self.dense1(z) - z = self.dropout2(z) - z = self.dense2(z) - z = self.dropout3(z) - out = self.dense3(z) - if foo is not None: - out = tl.ops.relu(out) - return out - - -class WithLoss(Module): - - def __init__(self, backbone, loss_fn): - super(WithLoss, self).__init__() - self._backbone = backbone - self._loss_fn = loss_fn - - def construct(self, data, label): - out = self._backbone(data) - return self._loss_fn(out, label) - - @property - def backbone_network(self): - return self._backbone - - -class GradWrap(Module): - """ GradWrap definition """ - - def __init__(self, network): - super(GradWrap, self).__init__(auto_prefix=False) - self.network = network - self.trainable = True - self.weights = ParameterTuple(network.trainable_weights) - - def construct(self, x, label): - weights = self.weights - return composite.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) - - -class TrainOneStep(Module): - - def __init__(self, network, optimizer, sens=1.0): - super(TrainOneStep, self).__init__(auto_prefix=False) - self._built = True - self.trainable = True - self.network = network - self.network.set_grad() - self.network.add_flags(defer_inline=True) - self.weights = optimizer.parameters - self.optimizer = optimizer - self.grad = composite.GradOperation('grad', get_by_list=True, sens_param=True) - self.sens = sens - self.reducer_flag = False - self.grad_reducer = None - parallel_mode = _get_parallel_mode() - if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL): - self.reducer_flag = True - if self.reducer_flag: - mean = _get_mirror_mean() - degree = _get_device_num() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) - - def construct(self, data, label): - weights = self.weights - loss = self.network(data, label) - sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) - grads = self.grad(self.network, weights)(data, label, sens) - if self.reducer_flag: - # apply grad reducer on grads - grads = self.grad_reducer(grads) - return F.depend(loss, self.optimizer(grads)) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield _input, _target - - -MLP = CustomModel() -train_weights = MLP.trainable_weights - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) - -opt = Momentum(train_weights, 0.01, 0.9) -n_epoch = 50 -batch_size = 128 -print_freq = 2 -model = tl.models.Model(network=MLP, loss_fn=SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True), optimizer=opt) -model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) - -# batch_size = 128 -# epoch = 50 -# -# # loss function definition -# ls = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) -# # optimization definition -# opt = Momentum(train_weights, 0.01, 0.9) -# net_with_criterion = WithLoss(MLP, ls) -# # train_network = TrainOneStep(net_with_criterion, opt) # optimizer -# train_network = GradWrap(net_with_criterion) -# acc = ms.nn.Accuracy() -# -# for epoch in range(epoch): -# MLP.set_train() -# for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): -# X_batch = ms.Tensor(X_batch, dtype=ms.float32) -# y_batch = ms.Tensor(y_batch, dtype=ms.int32) -# output = MLP(X_batch) -# loss_output = ls(output, y_batch) -# grads = train_network(X_batch, y_batch) -# success = opt(grads) -# loss = loss_output.asnumpy() -# accutacy = acc() -# print(loss) diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py new file mode 100644 index 000000000..27ae9be8c --- /dev/null +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import time +import numpy as np +import multiprocessing +import tensorflow as tf + +from tensorlayer.layers import Module, SequentialLayer +import tensorlayer as tl +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d, Elementwise) + +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + +class Block(Module): + def __init__(self, in_channels): + super(Block, self).__init__() + self.dense1 = Dense(in_channels=in_channels, n_units=256) + self.dense2 = Dense(in_channels=256, n_units=384) + self.dense3 = Dense(in_channels=in_channels, n_units=384) + self.concat = Elementwise(combine_fn=tl.ops.add) + + def forward(self, inputs): + z = self.dense1(inputs) + z1 = self.dense2(z) + + z2 = self.dense3(inputs) + out = self.concat([z1, z2]) + return out + + +class CNN(Module): + + def __init__(self): + super(CNN, self).__init__() + # weights init + W_init = tl.initializers.truncated_normal(stddev=5e-2) + W_init2 = tl.initializers.truncated_normal(stddev=0.04) + b_init2 = tl.initializers.constant(value=0.1) + + self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') + + self.conv2 = Conv2d( + 64, (5, 5), (1, 1), padding='SAME', act=tl.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64 + ) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') + + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) + self.dense_add = self.make_layer(in_channel=384) + + self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) + self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) + + def forward(self, x): + z = self.conv1(x) + z = self.bn(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense_add(z) + + z = self.dense2(z) + z = self.dense3(z) + return z + + def make_layer(self, in_channel): + layers = [] + + _block = Block(in_channel) + layers.append(_block) + + for _ in range(1, 3): + range_block = Block(in_channel) + layers.append(range_block) + + return SequentialLayer(layers) + + +# get the network +net = CNN() +# training settings +batch_size = 128 +n_epoch = 500 +learning_rate = 0.0001 +print_freq = 5 +n_step_epoch = int(len(y_train) / batch_size) +n_step = n_epoch * n_step_epoch +shuffle_buffer_size = 128 + +train_weights = net.trainable_weights +optimizer = tl.optimizers.Adam(learning_rate) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def generator_test(): + inputs = X_test + targets = y_test + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +def _map_fn_test(img, target): + # 1. Crop the central [height, width] of the image. + img = tf.image.resize_with_pad(img, 24, 24) + # 2. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + img = tf.reshape(img, (24, 24, 3)) + target = tf.reshape(target, ()) + return img, target + + +# dataset API and augmentation +train_ds = tf.data.Dataset.from_generator( + generator_train, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) +# train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) +# value = train_ds.make_one_shot_iterator().get_next() + +test_ds = tf.data.Dataset.from_generator( + generator_test, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +# test_ds = test_ds.shuffle(shuffle_buffer_size) +test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) +# test_ds = test_ds.repeat(n_epoch) +test_ds = test_ds.prefetch(buffer_size=4096) +test_ds = test_ds.batch(batch_size) +# value_test = test_ds.make_one_shot_iterator().get_next() + +for epoch in range(n_epoch): + start_time = time.time() + + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_ds: + net.set_train() + + with tf.GradientTape() as tape: + # compute outputs + _logits = net(X_batch) + # compute loss and update model + _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + + grad = tape.gradient(_loss_ce, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) + + train_loss += _loss_ce + train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + # use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + + net.eval() + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_ds: + _logits = net(X_batch) # is_train=False, disable dropout + val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + +# use testing data to evaluate the model +net.eval() +test_loss, test_acc, n_iter = 0, 0, 0 +for X_batch, y_batch in test_ds: + _logits = net(X_batch) + test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 +print(" test loss: {}".format(test_loss / n_iter)) +print(" test acc: {}".format(test_acc / n_iter)) \ No newline at end of file diff --git a/examples/model_zoo/__init__.py b/examples/model_zoo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/model_zoo/common.py b/examples/model_zoo/common.py new file mode 100644 index 000000000..7bc1bfd0b --- /dev/null +++ b/examples/model_zoo/common.py @@ -0,0 +1,287 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf +import colorsys, random, cv2 +import numpy as np +from tensorlayer.visualize import save_image + +def decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]): + batch_size = tf.shape(conv_output)[0] + conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS)) + + conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1) + + xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size)) + xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2] + xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [batch_size, 1, 1, 3, 1]) + + xy_grid = tf.cast(xy_grid, tf.float32) + + pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \ + STRIDES[i] + pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) + pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) + + pred_conf = tf.sigmoid(conv_raw_conf) + pred_prob = tf.sigmoid(conv_raw_prob) + + pred_prob = pred_conf * pred_prob + pred_prob = tf.reshape(pred_prob, (batch_size, -1, NUM_CLASS)) + pred_xywh = tf.reshape(pred_xywh, (batch_size, -1, 4)) + + return pred_xywh, pred_prob + + +def decode(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE=[1, 1, 1]): + return decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=i, XYSCALE=XYSCALE) + + +def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape=tf.constant([416, 416])): + scores_max = tf.math.reduce_max(scores, axis=-1) + + mask = scores_max >= score_threshold + class_boxes = tf.boolean_mask(box_xywh, mask) + pred_conf = tf.boolean_mask(scores, mask) + class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]]) + pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]]) + + box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1) + + input_shape = tf.cast(input_shape, dtype=tf.float32) + box_yx = box_xy[..., ::-1] + box_hw = box_wh[..., ::-1] + + box_mins = (box_yx - (box_hw / 2.)) / input_shape + box_maxes = (box_yx + (box_hw / 2.)) / input_shape + boxes = tf.concat( + [ + box_mins[..., 0:1], # y_min + box_mins[..., 1:2], # x_min + box_maxes[..., 0:1], # y_max + box_maxes[..., 1:2] # x_max + ], + axis=-1 + ) + # return tf.concat([boxes, pred_conf], axis=-1) + return (boxes, pred_conf) + + +def read_class_names(class_file_name): + names = {} + with open(class_file_name, 'r') as data: + for ID, name in enumerate(data): + names[ID] = name.strip('\n') + return names + + +def draw_bbox(image, bboxes, show_label=True): + classes = read_class_names('model/coco.names') + num_classes = len(classes) + image_h, image_w, _ = image.shape + hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] + colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) + colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) + + random.seed(0) + random.shuffle(colors) + random.seed(None) + + out_boxes, out_scores, out_classes, num_boxes = bboxes + for i in range(num_boxes[0]): + if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue + coor = out_boxes[0][i] + coor[0] = int(coor[0] * image_h) + coor[2] = int(coor[2] * image_h) + coor[1] = int(coor[1] * image_w) + coor[3] = int(coor[3] * image_w) + + fontScale = 0.5 + score = out_scores[0][i] + class_ind = int(out_classes[0][i]) + bbox_color = colors[class_ind] + bbox_thick = int(0.6 * (image_h + image_w) / 600) + c1, c2 = (coor[1], coor[0]), (coor[3], coor[2]) + cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) + + if show_label: + bbox_mess = '%s: %.2f' % (classes[class_ind], score) + t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] + c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) + cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled + + cv2.putText( + image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), + bbox_thick // 2, lineType=cv2.LINE_AA + ) + return image + + +def get_anchors(anchors_path, tiny=False): + anchors = np.array(anchors_path) + if tiny: + return anchors.reshape(2, 3, 2) + else: + return anchors.reshape(3, 3, 2) + + +def decode_train(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]): + conv_output = tf.reshape(conv_output, (tf.shape(conv_output)[0], output_size, output_size, 3, 5 + NUM_CLASS)) + + conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1) + + xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size)) + xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2] + xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1]) + + xy_grid = tf.cast(xy_grid, tf.float32) + + pred_xy = ((tf.sigmoid(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * \ + STRIDES[i] + pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) + pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) + + pred_conf = tf.sigmoid(conv_raw_conf) + pred_prob = tf.sigmoid(conv_raw_prob) + + return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1) + + +def yolo4_input_processing(original_image): + image_data = cv2.resize(original_image, (416, 416)) + image_data = image_data / 255. + images_data = [] + for i in range(1): + images_data.append(image_data) + images_data = np.asarray(images_data).astype(np.float32) + batch_data = tf.constant(images_data) + return batch_data + + +def yolo4_output_processing(feature_maps): + STRIDES = [8, 16, 32] + ANCHORS = get_anchors([12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]) + NUM_CLASS = 80 + XYSCALE = [1.2, 1.1, 1.05] + iou_threshold = 0.45 + score_threshold = 0.25 + + bbox_tensors = [] + prob_tensors = [] + score_thres = 0.2 + for i, fm in enumerate(feature_maps): + if i == 0: + output_tensors = decode(fm, 416 // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) + elif i == 1: + output_tensors = decode(fm, 416 // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) + else: + output_tensors = decode(fm, 416 // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) + bbox_tensors.append(output_tensors[0]) + prob_tensors.append(output_tensors[1]) + pred_bbox = tf.concat(bbox_tensors, axis=1) + pred_prob = tf.concat(prob_tensors, axis=1) + boxes, pred_conf = filter_boxes( + pred_bbox, pred_prob, score_threshold=score_thres, input_shape=tf.constant([416, 416]) + ) + pred = {'concat': tf.concat([boxes, pred_conf], axis=-1)} + + for key, value in pred.items(): + boxes = value[:, :, 0:4] + pred_conf = value[:, :, 4:] + + boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( + boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)), + scores=tf.reshape(pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])), + max_output_size_per_class=50, max_total_size=50, iou_threshold=iou_threshold, score_threshold=score_threshold + ) + output = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()] + return output + + +def result_to_json(image, pred_bbox): + image_h, image_w, _ = image.shape + out_boxes, out_scores, out_classes, num_boxes = pred_bbox + class_names = {} + json_result = [] + with open('model/coco.names', 'r') as data: + for ID, name in enumerate(data): + class_names[ID] = name.strip('\n') + nums_class = len(class_names) + + for i in range(num_boxes[0]): + if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > nums_class: continue + coor = out_boxes[0][i] + coor[0] = int(coor[0] * image_h) + coor[2] = int(coor[2] * image_h) + coor[1] = int(coor[1] * image_w) + coor[3] = int(coor[3] * image_w) + + score = float(out_scores[0][i]) + class_ind = int(out_classes[0][i]) + bbox = np.array([coor[1], coor[0], coor[3], coor[2]]).tolist() # [x1,y1,x2,y2] + json_result.append({'image': None, 'category_id': class_ind, 'bbox': bbox, 'score': score}) + + return json_result + + +def draw_boxes_and_labels_to_image_with_json(image, json_result, class_list, save_name=None): + """Draw bboxes and class labels on image. Return the image with bboxes. + + Parameters + ----------- + image : numpy.array + The RGB image [height, width, channel]. + json_result : list of dict + The object detection result with json format. + classes_list : list of str + For converting ID to string on image. + save_name : None or str + The name of image file (i.e. image.png), if None, not to save image. + + Returns + ------- + numpy.array + The saved image. + + References + ----------- + - OpenCV rectangle and putText. + - `scikit-image `__. + + """ + image_h, image_w, _ = image.shape + num_classes = len(class_list) + hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] + colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) + colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) + random.seed(0) + random.shuffle(colors) + random.seed(None) + bbox_thick = int(0.6 * (image_h + image_w) / 600) + fontScale = 0.5 + + for bbox_info in json_result: + image_name = bbox_info['image'] + category_id = bbox_info['category_id'] + if category_id < 0 or category_id > num_classes: continue + bbox = bbox_info['bbox'] # the order of coordinates is [x1, y2, x2, y2] + score = bbox_info['score'] + + bbox_color = colors[category_id] + c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) + cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) + + bbox_mess = '%s: %.2f' % (class_list[category_id], score) + t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] + c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) + cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) + + cv2.putText( + image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), + bbox_thick // 2, lineType=cv2.LINE_AA + ) + + if save_name is not None: + save_image(image, save_name) + + return image \ No newline at end of file diff --git a/examples/model_zoo/imagenet_classes.py b/examples/model_zoo/imagenet_classes.py new file mode 100644 index 000000000..d13cfda4a --- /dev/null +++ b/examples/model_zoo/imagenet_classes.py @@ -0,0 +1,1003 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +class_names = '''tench, Tinca tinca +goldfish, Carassius auratus +great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias +tiger shark, Galeocerdo cuvieri +hammerhead, hammerhead shark +electric ray, crampfish, numbfish, torpedo +stingray +cock +hen +ostrich, Struthio camelus +brambling, Fringilla montifringilla +goldfinch, Carduelis carduelis +house finch, linnet, Carpodacus mexicanus +junco, snowbird +indigo bunting, indigo finch, indigo bird, Passerina cyanea +robin, American robin, Turdus migratorius +bulbul +jay +magpie +chickadee +water ouzel, dipper +kite +bald eagle, American eagle, Haliaeetus leucocephalus +vulture +great grey owl, great gray owl, Strix nebulosa +European fire salamander, Salamandra salamandra +common newt, Triturus vulgaris +eft +spotted salamander, Ambystoma maculatum +axolotl, mud puppy, Ambystoma mexicanum +bullfrog, Rana catesbeiana +tree frog, tree-frog +tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui +loggerhead, loggerhead turtle, Caretta caretta +leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea +mud turtle +terrapin +box turtle, box tortoise +banded gecko +common iguana, iguana, Iguana iguana +American chameleon, anole, Anolis carolinensis +whiptail, whiptail lizard +agama +frilled lizard, Chlamydosaurus kingi +alligator lizard +Gila monster, Heloderma suspectum +green lizard, Lacerta viridis +African chameleon, Chamaeleo chamaeleon +Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis +African crocodile, Nile crocodile, Crocodylus niloticus +American alligator, Alligator mississipiensis +triceratops +thunder snake, worm snake, Carphophis amoenus +ringneck snake, ring-necked snake, ring snake +hognose snake, puff adder, sand viper +green snake, grass snake +king snake, kingsnake +garter snake, grass snake +water snake +vine snake +night snake, Hypsiglena torquata +boa constrictor, Constrictor constrictor +rock python, rock snake, Python sebae +Indian cobra, Naja naja +green mamba +sea snake +horned viper, cerastes, sand viper, horned asp, Cerastes cornutus +diamondback, diamondback rattlesnake, Crotalus adamanteus +sidewinder, horned rattlesnake, Crotalus cerastes +trilobite +harvestman, daddy longlegs, Phalangium opilio +scorpion +black and gold garden spider, Argiope aurantia +barn spider, Araneus cavaticus +garden spider, Aranea diademata +black widow, Latrodectus mactans +tarantula +wolf spider, hunting spider +tick +centipede +black grouse +ptarmigan +ruffed grouse, partridge, Bonasa umbellus +prairie chicken, prairie grouse, prairie fowl +peacock +quail +partridge +African grey, African gray, Psittacus erithacus +macaw +sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita +lorikeet +coucal +bee eater +hornbill +hummingbird +jacamar +toucan +drake +red-breasted merganser, Mergus serrator +goose +black swan, Cygnus atratus +tusker +echidna, spiny anteater, anteater +platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus +wallaby, brush kangaroo +koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus +wombat +jellyfish +sea anemone, anemone +brain coral +flatworm, platyhelminth +nematode, nematode worm, roundworm +conch +snail +slug +sea slug, nudibranch +chiton, coat-of-mail shell, sea cradle, polyplacophore +chambered nautilus, pearly nautilus, nautilus +Dungeness crab, Cancer magister +rock crab, Cancer irroratus +fiddler crab +king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica +American lobster, Northern lobster, Maine lobster, Homarus americanus +spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish +crayfish, crawfish, crawdad, crawdaddy +hermit crab +isopod +white stork, Ciconia ciconia +black stork, Ciconia nigra +spoonbill +flamingo +little blue heron, Egretta caerulea +American egret, great white heron, Egretta albus +bittern +crane +limpkin, Aramus pictus +European gallinule, Porphyrio porphyrio +American coot, marsh hen, mud hen, water hen, Fulica americana +bustard +ruddy turnstone, Arenaria interpres +red-backed sandpiper, dunlin, Erolia alpina +redshank, Tringa totanus +dowitcher +oystercatcher, oyster catcher +pelican +king penguin, Aptenodytes patagonica +albatross, mollymawk +grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus +killer whale, killer, orca, grampus, sea wolf, Orcinus orca +dugong, Dugong dugon +sea lion +Chihuahua +Japanese spaniel +Maltese dog, Maltese terrier, Maltese +Pekinese, Pekingese, Peke +Shih-Tzu +Blenheim spaniel +papillon +toy terrier +Rhodesian ridgeback +Afghan hound, Afghan +basset, basset hound +beagle +bloodhound, sleuthhound +bluetick +black-and-tan coonhound +Walker hound, Walker foxhound +English foxhound +redbone +borzoi, Russian wolfhound +Irish wolfhound +Italian greyhound +whippet +Ibizan hound, Ibizan Podenco +Norwegian elkhound, elkhound +otterhound, otter hound +Saluki, gazelle hound +Scottish deerhound, deerhound +Weimaraner +Staffordshire bullterrier, Staffordshire bull terrier +American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier +Bedlington terrier +Border terrier +Kerry blue terrier +Irish terrier +Norfolk terrier +Norwich terrier +Yorkshire terrier +wire-haired fox terrier +Lakeland terrier +Sealyham terrier, Sealyham +Airedale, Airedale terrier +cairn, cairn terrier +Australian terrier +Dandie Dinmont, Dandie Dinmont terrier +Boston bull, Boston terrier +miniature schnauzer +giant schnauzer +standard schnauzer +Scotch terrier, Scottish terrier, Scottie +Tibetan terrier, chrysanthemum dog +silky terrier, Sydney silky +soft-coated wheaten terrier +West Highland white terrier +Lhasa, Lhasa apso +flat-coated retriever +curly-coated retriever +golden retriever +Labrador retriever +Chesapeake Bay retriever +German short-haired pointer +vizsla, Hungarian pointer +English setter +Irish setter, red setter +Gordon setter +Brittany spaniel +clumber, clumber spaniel +English springer, English springer spaniel +Welsh springer spaniel +cocker spaniel, English cocker spaniel, cocker +Sussex spaniel +Irish water spaniel +kuvasz +schipperke +groenendael +malinois +briard +kelpie +komondor +Old English sheepdog, bobtail +Shetland sheepdog, Shetland sheep dog, Shetland +collie +Border collie +Bouvier des Flandres, Bouviers des Flandres +Rottweiler +German shepherd, German shepherd dog, German police dog, alsatian +Doberman, Doberman pinscher +miniature pinscher +Greater Swiss Mountain dog +Bernese mountain dog +Appenzeller +EntleBucher +boxer +bull mastiff +Tibetan mastiff +French bulldog +Great Dane +Saint Bernard, St Bernard +Eskimo dog, husky +malamute, malemute, Alaskan malamute +Siberian husky +dalmatian, coach dog, carriage dog +affenpinscher, monkey pinscher, monkey dog +basenji +pug, pug-dog +Leonberg +Newfoundland, Newfoundland dog +Great Pyrenees +Samoyed, Samoyede +Pomeranian +chow, chow chow +keeshond +Brabancon griffon +Pembroke, Pembroke Welsh corgi +Cardigan, Cardigan Welsh corgi +toy poodle +miniature poodle +standard poodle +Mexican hairless +timber wolf, grey wolf, gray wolf, Canis lupus +white wolf, Arctic wolf, Canis lupus tundrarum +red wolf, maned wolf, Canis rufus, Canis niger +coyote, prairie wolf, brush wolf, Canis latrans +dingo, warrigal, warragal, Canis dingo +dhole, Cuon alpinus +African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus +hyena, hyaena +red fox, Vulpes vulpes +kit fox, Vulpes macrotis +Arctic fox, white fox, Alopex lagopus +grey fox, gray fox, Urocyon cinereoargenteus +tabby, tabby cat +tiger cat +Persian cat +Siamese cat, Siamese +Egyptian cat +cougar, puma, catamount, mountain lion, painter, panther, Felis concolor +lynx, catamount +leopard, Panthera pardus +snow leopard, ounce, Panthera uncia +jaguar, panther, Panthera onca, Felis onca +lion, king of beasts, Panthera leo +tiger, Panthera tigris +cheetah, chetah, Acinonyx jubatus +brown bear, bruin, Ursus arctos +American black bear, black bear, Ursus americanus, Euarctos americanus +ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus +sloth bear, Melursus ursinus, Ursus ursinus +mongoose +meerkat, mierkat +tiger beetle +ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle +ground beetle, carabid beetle +long-horned beetle, longicorn, longicorn beetle +leaf beetle, chrysomelid +dung beetle +rhinoceros beetle +weevil +fly +bee +ant, emmet, pismire +grasshopper, hopper +cricket +walking stick, walkingstick, stick insect +cockroach, roach +mantis, mantid +cicada, cicala +leafhopper +lacewing, lacewing fly +dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk +damselfly +admiral +ringlet, ringlet butterfly +monarch, monarch butterfly, milkweed butterfly, Danaus plexippus +cabbage butterfly +sulphur butterfly, sulfur butterfly +lycaenid, lycaenid butterfly +starfish, sea star +sea urchin +sea cucumber, holothurian +wood rabbit, cottontail, cottontail rabbit +hare +Angora, Angora rabbit +hamster +porcupine, hedgehog +fox squirrel, eastern fox squirrel, Sciurus niger +marmot +beaver +guinea pig, Cavia cobaya +sorrel +zebra +hog, pig, grunter, squealer, Sus scrofa +wild boar, boar, Sus scrofa +warthog +hippopotamus, hippo, river horse, Hippopotamus amphibius +ox +water buffalo, water ox, Asiatic buffalo, Bubalus bubalis +bison +ram, tup +bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis +ibex, Capra ibex +hartebeest +impala, Aepyceros melampus +gazelle +Arabian camel, dromedary, Camelus dromedarius +llama +weasel +mink +polecat, fitch, foulmart, foumart, Mustela putorius +black-footed ferret, ferret, Mustela nigripes +otter +skunk, polecat, wood pussy +badger +armadillo +three-toed sloth, ai, Bradypus tridactylus +orangutan, orang, orangutang, Pongo pygmaeus +gorilla, Gorilla gorilla +chimpanzee, chimp, Pan troglodytes +gibbon, Hylobates lar +siamang, Hylobates syndactylus, Symphalangus syndactylus +guenon, guenon monkey +patas, hussar monkey, Erythrocebus patas +baboon +macaque +langur +colobus, colobus monkey +proboscis monkey, Nasalis larvatus +marmoset +capuchin, ringtail, Cebus capucinus +howler monkey, howler +titi, titi monkey +spider monkey, Ateles geoffroyi +squirrel monkey, Saimiri sciureus +Madagascar cat, ring-tailed lemur, Lemur catta +indri, indris, Indri indri, Indri brevicaudatus +Indian elephant, Elephas maximus +African elephant, Loxodonta africana +lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens +giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca +barracouta, snoek +eel +coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch +rock beauty, Holocanthus tricolor +anemone fish +sturgeon +gar, garfish, garpike, billfish, Lepisosteus osseus +lionfish +puffer, pufferfish, blowfish, globefish +abacus +abaya +academic gown, academic robe, judge's robe +accordion, piano accordion, squeeze box +acoustic guitar +aircraft carrier, carrier, flattop, attack aircraft carrier +airliner +airship, dirigible +altar +ambulance +amphibian, amphibious vehicle +analog clock +apiary, bee house +apron +ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin +assault rifle, assault gun +backpack, back pack, knapsack, packsack, rucksack, haversack +bakery, bakeshop, bakehouse +balance beam, beam +balloon +ballpoint, ballpoint pen, ballpen, Biro +Band Aid +banjo +bannister, banister, balustrade, balusters, handrail +barbell +barber chair +barbershop +barn +barometer +barrel, cask +barrow, garden cart, lawn cart, wheelbarrow +baseball +basketball +bassinet +bassoon +bathing cap, swimming cap +bath towel +bathtub, bathing tub, bath, tub +beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon +beacon, lighthouse, beacon light, pharos +beaker +bearskin, busby, shako +beer bottle +beer glass +bell cote, bell cot +bib +bicycle-built-for-two, tandem bicycle, tandem +bikini, two-piece +binder, ring-binder +binoculars, field glasses, opera glasses +birdhouse +boathouse +bobsled, bobsleigh, bob +bolo tie, bolo, bola tie, bola +bonnet, poke bonnet +bookcase +bookshop, bookstore, bookstall +bottlecap +bow +bow tie, bow-tie, bowtie +brass, memorial tablet, plaque +brassiere, bra, bandeau +breakwater, groin, groyne, mole, bulwark, seawall, jetty +breastplate, aegis, egis +broom +bucket, pail +buckle +bulletproof vest +bullet train, bullet +butcher shop, meat market +cab, hack, taxi, taxicab +caldron, cauldron +candle, taper, wax light +cannon +canoe +can opener, tin opener +cardigan +car mirror +carousel, carrousel, merry-go-round, roundabout, whirligig +carpenter's kit, tool kit +carton +car wheel +cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM +cassette +cassette player +castle +catamaran +CD player +cello, violoncello +cellular telephone, cellular phone, cellphone, cell, mobile phone +chain +chainlink fence +chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour +chain saw, chainsaw +chest +chiffonier, commode +chime, bell, gong +china cabinet, china closet +Christmas stocking +church, church building +cinema, movie theater, movie theatre, movie house, picture palace +cleaver, meat cleaver, chopper +cliff dwelling +cloak +clog, geta, patten, sabot +cocktail shaker +coffee mug +coffeepot +coil, spiral, volute, whorl, helix +combination lock +computer keyboard, keypad +confectionery, confectionary, candy store +container ship, containership, container vessel +convertible +corkscrew, bottle screw +cornet, horn, trumpet, trump +cowboy boot +cowboy hat, ten-gallon hat +cradle +crane +crash helmet +crate +crib, cot +Crock Pot +croquet ball +crutch +cuirass +dam, dike, dyke +desk +desktop computer +dial telephone, dial phone +diaper, nappy, napkin +digital clock +digital watch +dining table, board +dishrag, dishcloth +dishwasher, dish washer, dishwashing machine +disk brake, disc brake +dock, dockage, docking facility +dogsled, dog sled, dog sleigh +dome +doormat, welcome mat +drilling platform, offshore rig +drum, membranophone, tympan +drumstick +dumbbell +Dutch oven +electric fan, blower +electric guitar +electric locomotive +entertainment center +envelope +espresso maker +face powder +feather boa, boa +file, file cabinet, filing cabinet +fireboat +fire engine, fire truck +fire screen, fireguard +flagpole, flagstaff +flute, transverse flute +folding chair +football helmet +forklift +fountain +fountain pen +four-poster +freight car +French horn, horn +frying pan, frypan, skillet +fur coat +garbage truck, dustcart +gasmask, respirator, gas helmet +gas pump, gasoline pump, petrol pump, island dispenser +goblet +go-kart +golf ball +golfcart, golf cart +gondola +gong, tam-tam +gown +grand piano, grand +greenhouse, nursery, glasshouse +grille, radiator grille +grocery store, grocery, food market, market +guillotine +hair slide +hair spray +half track +hammer +hamper +hand blower, blow dryer, blow drier, hair dryer, hair drier +hand-held computer, hand-held microcomputer +handkerchief, hankie, hanky, hankey +hard disc, hard disk, fixed disk +harmonica, mouth organ, harp, mouth harp +harp +harvester, reaper +hatchet +holster +home theater, home theatre +honeycomb +hook, claw +hoopskirt, crinoline +horizontal bar, high bar +horse cart, horse-cart +hourglass +iPod +iron, smoothing iron +jack-o'-lantern +jean, blue jean, denim +jeep, landrover +jersey, T-shirt, tee shirt +jigsaw puzzle +jinrikisha, ricksha, rickshaw +joystick +kimono +knee pad +knot +lab coat, laboratory coat +ladle +lampshade, lamp shade +laptop, laptop computer +lawn mower, mower +lens cap, lens cover +letter opener, paper knife, paperknife +library +lifeboat +lighter, light, igniter, ignitor +limousine, limo +liner, ocean liner +lipstick, lip rouge +Loafer +lotion +loudspeaker, speaker, speaker unit, loudspeaker system, speaker system +loupe, jeweler's loupe +lumbermill, sawmill +magnetic compass +mailbag, postbag +mailbox, letter box +maillot +maillot, tank suit +manhole cover +maraca +marimba, xylophone +mask +matchstick +maypole +maze, labyrinth +measuring cup +medicine chest, medicine cabinet +megalith, megalithic structure +microphone, mike +microwave, microwave oven +military uniform +milk can +minibus +miniskirt, mini +minivan +missile +mitten +mixing bowl +mobile home, manufactured home +Model T +modem +monastery +monitor +moped +mortar +mortarboard +mosque +mosquito net +motor scooter, scooter +mountain bike, all-terrain bike, off-roader +mountain tent +mouse, computer mouse +mousetrap +moving van +muzzle +nail +neck brace +necklace +nipple +notebook, notebook computer +obelisk +oboe, hautboy, hautbois +ocarina, sweet potato +odometer, hodometer, mileometer, milometer +oil filter +organ, pipe organ +oscilloscope, scope, cathode-ray oscilloscope, CRO +overskirt +oxcart +oxygen mask +packet +paddle, boat paddle +paddlewheel, paddle wheel +padlock +paintbrush +pajama, pyjama, pj's, jammies +palace +panpipe, pandean pipe, syrinx +paper towel +parachute, chute +parallel bars, bars +park bench +parking meter +passenger car, coach, carriage +patio, terrace +pay-phone, pay-station +pedestal, plinth, footstall +pencil box, pencil case +pencil sharpener +perfume, essence +Petri dish +photocopier +pick, plectrum, plectron +pickelhaube +picket fence, paling +pickup, pickup truck +pier +piggy bank, penny bank +pill bottle +pillow +ping-pong ball +pinwheel +pirate, pirate ship +pitcher, ewer +plane, carpenter's plane, woodworking plane +planetarium +plastic bag +plate rack +plow, plough +plunger, plumber's helper +Polaroid camera, Polaroid Land camera +pole +police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria +poncho +pool table, billiard table, snooker table +pop bottle, soda bottle +pot, flowerpot +potter's wheel +power drill +prayer rug, prayer mat +printer +prison, prison house +projectile, missile +projector +puck, hockey puck +punching bag, punch bag, punching ball, punchball +purse +quill, quill pen +quilt, comforter, comfort, puff +racer, race car, racing car +racket, racquet +radiator +radio, wireless +radio telescope, radio reflector +rain barrel +recreational vehicle, RV, R.V. +reel +reflex camera +refrigerator, icebox +remote control, remote +restaurant, eating house, eating place, eatery +revolver, six-gun, six-shooter +rifle +rocking chair, rocker +rotisserie +rubber eraser, rubber, pencil eraser +rugby ball +rule, ruler +running shoe +safe +safety pin +saltshaker, salt shaker +sandal +sarong +sax, saxophone +scabbard +scale, weighing machine +school bus +schooner +scoreboard +screen, CRT screen +screw +screwdriver +seat belt, seatbelt +sewing machine +shield, buckler +shoe shop, shoe-shop, shoe store +shoji +shopping basket +shopping cart +shovel +shower cap +shower curtain +ski +ski mask +sleeping bag +slide rule, slipstick +sliding door +slot, one-armed bandit +snorkel +snowmobile +snowplow, snowplough +soap dispenser +soccer ball +sock +solar dish, solar collector, solar furnace +sombrero +soup bowl +space bar +space heater +space shuttle +spatula +speedboat +spider web, spider's web +spindle +sports car, sport car +spotlight, spot +stage +steam locomotive +steel arch bridge +steel drum +stethoscope +stole +stone wall +stopwatch, stop watch +stove +strainer +streetcar, tram, tramcar, trolley, trolley car +stretcher +studio couch, day bed +stupa, tope +submarine, pigboat, sub, U-boat +suit, suit of clothes +sundial +sunglass +sunglasses, dark glasses, shades +sunscreen, sunblock, sun blocker +suspension bridge +swab, swob, mop +sweatshirt +swimming trunks, bathing trunks +swing +switch, electric switch, electrical switch +syringe +table lamp +tank, army tank, armored combat vehicle, armoured combat vehicle +tape player +teapot +teddy, teddy bear +television, television system +tennis ball +thatch, thatched roof +theater curtain, theatre curtain +thimble +thresher, thrasher, threshing machine +throne +tile roof +toaster +tobacco shop, tobacconist shop, tobacconist +toilet seat +torch +totem pole +tow truck, tow car, wrecker +toyshop +tractor +trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi +tray +trench coat +tricycle, trike, velocipede +trimaran +tripod +triumphal arch +trolleybus, trolley coach, trackless trolley +trombone +tub, vat +turnstile +typewriter keyboard +umbrella +unicycle, monocycle +upright, upright piano +vacuum, vacuum cleaner +vase +vault +velvet +vending machine +vestment +viaduct +violin, fiddle +volleyball +waffle iron +wall clock +wallet, billfold, notecase, pocketbook +wardrobe, closet, press +warplane, military plane +washbasin, handbasin, washbowl, lavabo, wash-hand basin +washer, automatic washer, washing machine +water bottle +water jug +water tower +whiskey jug +whistle +wig +window screen +window shade +Windsor tie +wine bottle +wing +wok +wooden spoon +wool, woolen, woollen +worm fence, snake fence, snake-rail fence, Virginia fence +wreck +yawl +yurt +web site, website, internet site, site +comic book +crossword puzzle, crossword +street sign +traffic light, traffic signal, stoplight +book jacket, dust cover, dust jacket, dust wrapper +menu +plate +guacamole +consomme +hot pot, hotpot +trifle +ice cream, icecream +ice lolly, lolly, lollipop, popsicle +French loaf +bagel, beigel +pretzel +cheeseburger +hotdog, hot dog, red hot +mashed potato +head cabbage +broccoli +cauliflower +zucchini, courgette +spaghetti squash +acorn squash +butternut squash +cucumber, cuke +artichoke, globe artichoke +bell pepper +cardoon +mushroom +Granny Smith +strawberry +orange +lemon +fig +pineapple, ananas +banana +jackfruit, jak, jack +custard apple +pomegranate +hay +carbonara +chocolate sauce, chocolate syrup +dough +meat loaf, meatloaf +pizza, pizza pie +potpie +burrito +red wine +espresso +cup +eggnog +alp +bubble +cliff, drop, drop-off +coral reef +geyser +lakeside, lakeshore +promontory, headland, head, foreland +sandbar, sand bar +seashore, coast, seacoast, sea-coast +valley, vale +volcano +ballplayer, baseball player +groom, bridegroom +scuba diver +rapeseed +daisy +yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum +corn +acorn +hip, rose hip, rosehip +buckeye, horse chestnut, conker +coral fungus +agaric +gyromitra +stinkhorn, carrion fungus +earthstar +hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa +bolete +ear, spike, capitulum +toilet tissue, toilet paper, bathroom tissue'''.split("\n") diff --git a/examples/model_zoo/model/coco.names b/examples/model_zoo/model/coco.names new file mode 100644 index 000000000..ec82f0ffd --- /dev/null +++ b/examples/model_zoo/model/coco.names @@ -0,0 +1,80 @@ +person +bicycle +car +motorbike +aeroplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +sofa +potted plant +bed +dining table +toilet +tvmonitor +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/examples/model_zoo/model/weights_2.txt b/examples/model_zoo/model/weights_2.txt new file mode 100644 index 000000000..42cc4997c --- /dev/null +++ b/examples/model_zoo/model/weights_2.txt @@ -0,0 +1,541 @@ +conv2d_1/filters:0 +batchnorm2d_1/beta:0 +batchnorm2d_1/gamma:0 +batchnorm2d_1/moving_mean:0 +batchnorm2d_1/moving_var:0 +conv2d_2/filters:0 +batchnorm2d_2/beta:0 +batchnorm2d_2/gamma:0 +batchnorm2d_2/moving_mean:0 +batchnorm2d_2/moving_var:0 +conv_rote_block_1/filters:0 +conv2d_3/filters:0 +batchnorm2d_3/beta:0 +batchnorm2d_3/gamma:0 +batchnorm2d_3/moving_mean:0 +batchnorm2d_3/moving_var:0 +batchnorm2d_4/beta:0 +batchnorm2d_4/gamma:0 +batchnorm2d_4/moving_mean:0 +batchnorm2d_4/moving_var:0 +conv2d_4/filters:0 +batchnorm2d_5/beta:0 +batchnorm2d_5/gamma:0 +batchnorm2d_5/moving_mean:0 +batchnorm2d_5/moving_var:0 +conv2d_5/filters:0 +batchnorm2d_6/beta:0 +batchnorm2d_6/gamma:0 +batchnorm2d_6/moving_mean:0 +batchnorm2d_6/moving_var:0 +conv2d_6/filters:0 +batchnorm2d_7/beta:0 +batchnorm2d_7/gamma:0 +batchnorm2d_7/moving_mean:0 +batchnorm2d_7/moving_var:0 +conv2d_7/filters:0 +batchnorm2d_8/beta:0 +batchnorm2d_8/gamma:0 +batchnorm2d_8/moving_mean:0 +batchnorm2d_8/moving_var:0 +conv2d_8/filters:0 +batchnorm2d_9/beta:0 +batchnorm2d_9/gamma:0 +batchnorm2d_9/moving_mean:0 +batchnorm2d_9/moving_var:0 +conv_rote_block_2/filters:0 +conv2d_9/filters:0 +batchnorm2d_10/beta:0 +batchnorm2d_10/gamma:0 +batchnorm2d_10/moving_mean:0 +batchnorm2d_10/moving_var:0 +batchnorm2d_11/beta:0 +batchnorm2d_11/gamma:0 +batchnorm2d_11/moving_mean:0 +batchnorm2d_11/moving_var:0 +conv2d_10/filters:0 +batchnorm2d_12/beta:0 +batchnorm2d_12/gamma:0 +batchnorm2d_12/moving_mean:0 +batchnorm2d_12/moving_var:0 +conv2d_11/filters:0 +batchnorm2d_13/beta:0 +batchnorm2d_13/gamma:0 +batchnorm2d_13/moving_mean:0 +batchnorm2d_13/moving_var:0 +conv2d_12/filters:0 +batchnorm2d_14/beta:0 +batchnorm2d_14/gamma:0 +batchnorm2d_14/moving_mean:0 +batchnorm2d_14/moving_var:0 +conv2d_13/filters:0 +batchnorm2d_15/beta:0 +batchnorm2d_15/gamma:0 +batchnorm2d_15/moving_mean:0 +batchnorm2d_15/moving_var:0 +conv2d_14/filters:0 +batchnorm2d_16/beta:0 +batchnorm2d_16/gamma:0 +batchnorm2d_16/moving_mean:0 +batchnorm2d_16/moving_var:0 +conv2d_15/filters:0 +batchnorm2d_17/beta:0 +batchnorm2d_17/gamma:0 +batchnorm2d_17/moving_mean:0 +batchnorm2d_17/moving_var:0 +conv2d_16/filters:0 +batchnorm2d_18/beta:0 +batchnorm2d_18/gamma:0 +batchnorm2d_18/moving_mean:0 +batchnorm2d_18/moving_var:0 +conv_rote_block_3/filters:0 +conv2d_17/filters:0 +batchnorm2d_19/beta:0 +batchnorm2d_19/gamma:0 +batchnorm2d_19/moving_mean:0 +batchnorm2d_19/moving_var:0 +batchnorm2d_20/beta:0 +batchnorm2d_20/gamma:0 +batchnorm2d_20/moving_mean:0 +batchnorm2d_20/moving_var:0 +conv2d_18/filters:0 +batchnorm2d_21/beta:0 +batchnorm2d_21/gamma:0 +batchnorm2d_21/moving_mean:0 +batchnorm2d_21/moving_var:0 +conv2d_19/filters:0 +batchnorm2d_22/beta:0 +batchnorm2d_22/gamma:0 +batchnorm2d_22/moving_mean:0 +batchnorm2d_22/moving_var:0 +conv2d_20/filters:0 +batchnorm2d_23/beta:0 +batchnorm2d_23/gamma:0 +batchnorm2d_23/moving_mean:0 +batchnorm2d_23/moving_var:0 +conv2d_21/filters:0 +batchnorm2d_24/beta:0 +batchnorm2d_24/gamma:0 +batchnorm2d_24/moving_mean:0 +batchnorm2d_24/moving_var:0 +conv2d_22/filters:0 +batchnorm2d_25/beta:0 +batchnorm2d_25/gamma:0 +batchnorm2d_25/moving_mean:0 +batchnorm2d_25/moving_var:0 +conv2d_23/filters:0 +batchnorm2d_26/beta:0 +batchnorm2d_26/gamma:0 +batchnorm2d_26/moving_mean:0 +batchnorm2d_26/moving_var:0 +conv2d_24/filters:0 +batchnorm2d_27/beta:0 +batchnorm2d_27/gamma:0 +batchnorm2d_27/moving_mean:0 +batchnorm2d_27/moving_var:0 +conv2d_25/filters:0 +batchnorm2d_28/beta:0 +batchnorm2d_28/gamma:0 +batchnorm2d_28/moving_mean:0 +batchnorm2d_28/moving_var:0 +conv2d_26/filters:0 +batchnorm2d_29/beta:0 +batchnorm2d_29/gamma:0 +batchnorm2d_29/moving_mean:0 +batchnorm2d_29/moving_var:0 +conv2d_27/filters:0 +batchnorm2d_30/beta:0 +batchnorm2d_30/gamma:0 +batchnorm2d_30/moving_mean:0 +batchnorm2d_30/moving_var:0 +conv2d_28/filters:0 +batchnorm2d_31/beta:0 +batchnorm2d_31/gamma:0 +batchnorm2d_31/moving_mean:0 +batchnorm2d_31/moving_var:0 +conv2d_29/filters:0 +batchnorm2d_32/beta:0 +batchnorm2d_32/gamma:0 +batchnorm2d_32/moving_mean:0 +batchnorm2d_32/moving_var:0 +conv2d_30/filters:0 +batchnorm2d_33/beta:0 +batchnorm2d_33/gamma:0 +batchnorm2d_33/moving_mean:0 +batchnorm2d_33/moving_var:0 +conv2d_31/filters:0 +batchnorm2d_34/beta:0 +batchnorm2d_34/gamma:0 +batchnorm2d_34/moving_mean:0 +batchnorm2d_34/moving_var:0 +conv2d_32/filters:0 +batchnorm2d_35/beta:0 +batchnorm2d_35/gamma:0 +batchnorm2d_35/moving_mean:0 +batchnorm2d_35/moving_var:0 +conv2d_33/filters:0 +batchnorm2d_36/beta:0 +batchnorm2d_36/gamma:0 +batchnorm2d_36/moving_mean:0 +batchnorm2d_36/moving_var:0 +conv2d_34/filters:0 +batchnorm2d_37/beta:0 +batchnorm2d_37/gamma:0 +batchnorm2d_37/moving_mean:0 +batchnorm2d_37/moving_var:0 +conv2d_35/filters:0 +batchnorm2d_38/beta:0 +batchnorm2d_38/gamma:0 +batchnorm2d_38/moving_mean:0 +batchnorm2d_38/moving_var:0 +conv_yolo_2/filters:0 +batchnorm2d_87/beta:0 +batchnorm2d_87/gamma:0 +batchnorm2d_87/moving_mean:0 +batchnorm2d_87/moving_var:0 +conv2d_36/filters:0 +batchnorm2d_39/beta:0 +batchnorm2d_39/gamma:0 +batchnorm2d_39/moving_mean:0 +batchnorm2d_39/moving_var:0 +conv_rote_block_4/filters:0 +conv2d_37/filters:0 +batchnorm2d_40/beta:0 +batchnorm2d_40/gamma:0 +batchnorm2d_40/moving_mean:0 +batchnorm2d_40/moving_var:0 +batchnorm2d_41/beta:0 +batchnorm2d_41/gamma:0 +batchnorm2d_41/moving_mean:0 +batchnorm2d_41/moving_var:0 +conv2d_38/filters:0 +batchnorm2d_42/beta:0 +batchnorm2d_42/gamma:0 +batchnorm2d_42/moving_mean:0 +batchnorm2d_42/moving_var:0 +conv2d_39/filters:0 +batchnorm2d_43/beta:0 +batchnorm2d_43/gamma:0 +batchnorm2d_43/moving_mean:0 +batchnorm2d_43/moving_var:0 +conv2d_40/filters:0 +batchnorm2d_44/beta:0 +batchnorm2d_44/gamma:0 +batchnorm2d_44/moving_mean:0 +batchnorm2d_44/moving_var:0 +conv2d_41/filters:0 +batchnorm2d_45/beta:0 +batchnorm2d_45/gamma:0 +batchnorm2d_45/moving_mean:0 +batchnorm2d_45/moving_var:0 +conv2d_42/filters:0 +batchnorm2d_46/beta:0 +batchnorm2d_46/gamma:0 +batchnorm2d_46/moving_mean:0 +batchnorm2d_46/moving_var:0 +conv2d_43/filters:0 +batchnorm2d_47/beta:0 +batchnorm2d_47/gamma:0 +batchnorm2d_47/moving_mean:0 +batchnorm2d_47/moving_var:0 +conv2d_44/filters:0 +batchnorm2d_48/beta:0 +batchnorm2d_48/gamma:0 +batchnorm2d_48/moving_mean:0 +batchnorm2d_48/moving_var:0 +conv2d_45/filters:0 +batchnorm2d_49/beta:0 +batchnorm2d_49/gamma:0 +batchnorm2d_49/moving_mean:0 +batchnorm2d_49/moving_var:0 +conv2d_46/filters:0 +batchnorm2d_50/beta:0 +batchnorm2d_50/gamma:0 +batchnorm2d_50/moving_mean:0 +batchnorm2d_50/moving_var:0 +conv2d_47/filters:0 +batchnorm2d_51/beta:0 +batchnorm2d_51/gamma:0 +batchnorm2d_51/moving_mean:0 +batchnorm2d_51/moving_var:0 +conv2d_48/filters:0 +batchnorm2d_52/beta:0 +batchnorm2d_52/gamma:0 +batchnorm2d_52/moving_mean:0 +batchnorm2d_52/moving_var:0 +conv2d_49/filters:0 +batchnorm2d_53/beta:0 +batchnorm2d_53/gamma:0 +batchnorm2d_53/moving_mean:0 +batchnorm2d_53/moving_var:0 +conv2d_50/filters:0 +batchnorm2d_54/beta:0 +batchnorm2d_54/gamma:0 +batchnorm2d_54/moving_mean:0 +batchnorm2d_54/moving_var:0 +conv2d_51/filters:0 +batchnorm2d_55/beta:0 +batchnorm2d_55/gamma:0 +batchnorm2d_55/moving_mean:0 +batchnorm2d_55/moving_var:0 +conv2d_52/filters:0 +batchnorm2d_56/beta:0 +batchnorm2d_56/gamma:0 +batchnorm2d_56/moving_mean:0 +batchnorm2d_56/moving_var:0 +conv2d_53/filters:0 +batchnorm2d_57/beta:0 +batchnorm2d_57/gamma:0 +batchnorm2d_57/moving_mean:0 +batchnorm2d_57/moving_var:0 +conv2d_54/filters:0 +batchnorm2d_58/beta:0 +batchnorm2d_58/gamma:0 +batchnorm2d_58/moving_mean:0 +batchnorm2d_58/moving_var:0 +conv2d_55/filters:0 +batchnorm2d_59/beta:0 +batchnorm2d_59/gamma:0 +batchnorm2d_59/moving_mean:0 +batchnorm2d_59/moving_var:0 +conv_yolo_1/filters:0 +batchnorm2d_80/beta:0 +batchnorm2d_80/gamma:0 +batchnorm2d_80/moving_mean:0 +batchnorm2d_80/moving_var:0 +conv2d_56/filters:0 +batchnorm2d_60/beta:0 +batchnorm2d_60/gamma:0 +batchnorm2d_60/moving_mean:0 +batchnorm2d_60/moving_var:0 +conv_rote_block_5/filters:0 +conv2d_57/filters:0 +batchnorm2d_61/beta:0 +batchnorm2d_61/gamma:0 +batchnorm2d_61/moving_mean:0 +batchnorm2d_61/moving_var:0 +batchnorm2d_62/beta:0 +batchnorm2d_62/gamma:0 +batchnorm2d_62/moving_mean:0 +batchnorm2d_62/moving_var:0 +conv2d_58/filters:0 +batchnorm2d_63/beta:0 +batchnorm2d_63/gamma:0 +batchnorm2d_63/moving_mean:0 +batchnorm2d_63/moving_var:0 +conv2d_59/filters:0 +batchnorm2d_64/beta:0 +batchnorm2d_64/gamma:0 +batchnorm2d_64/moving_mean:0 +batchnorm2d_64/moving_var:0 +conv2d_60/filters:0 +batchnorm2d_65/beta:0 +batchnorm2d_65/gamma:0 +batchnorm2d_65/moving_mean:0 +batchnorm2d_65/moving_var:0 +conv2d_61/filters:0 +batchnorm2d_66/beta:0 +batchnorm2d_66/gamma:0 +batchnorm2d_66/moving_mean:0 +batchnorm2d_66/moving_var:0 +conv2d_62/filters:0 +batchnorm2d_67/beta:0 +batchnorm2d_67/gamma:0 +batchnorm2d_67/moving_mean:0 +batchnorm2d_67/moving_var:0 +conv2d_63/filters:0 +batchnorm2d_68/beta:0 +batchnorm2d_68/gamma:0 +batchnorm2d_68/moving_mean:0 +batchnorm2d_68/moving_var:0 +conv2d_64/filters:0 +batchnorm2d_69/beta:0 +batchnorm2d_69/gamma:0 +batchnorm2d_69/moving_mean:0 +batchnorm2d_69/moving_var:0 +conv2d_65/filters:0 +batchnorm2d_70/beta:0 +batchnorm2d_70/gamma:0 +batchnorm2d_70/moving_mean:0 +batchnorm2d_70/moving_var:0 +conv2d_66/filters:0 +batchnorm2d_71/beta:0 +batchnorm2d_71/gamma:0 +batchnorm2d_71/moving_mean:0 +batchnorm2d_71/moving_var:0 +conv2d_67/filters:0 +batchnorm2d_72/beta:0 +batchnorm2d_72/gamma:0 +batchnorm2d_72/moving_mean:0 +batchnorm2d_72/moving_var:0 +conv2d_68/filters:0 +batchnorm2d_73/beta:0 +batchnorm2d_73/gamma:0 +batchnorm2d_73/moving_mean:0 +batchnorm2d_73/moving_var:0 +conv2d_69/filters:0 +batchnorm2d_74/beta:0 +batchnorm2d_74/gamma:0 +batchnorm2d_74/moving_mean:0 +batchnorm2d_74/moving_var:0 +conv2d_70/filters:0 +batchnorm2d_75/beta:0 +batchnorm2d_75/gamma:0 +batchnorm2d_75/moving_mean:0 +batchnorm2d_75/moving_var:0 +conv2d_71/filters:0 +batchnorm2d_76/beta:0 +batchnorm2d_76/gamma:0 +batchnorm2d_76/moving_mean:0 +batchnorm2d_76/moving_var:0 +conv2d_72/filters:0 +batchnorm2d_77/beta:0 +batchnorm2d_77/gamma:0 +batchnorm2d_77/moving_mean:0 +batchnorm2d_77/moving_var:0 +conv2d_73/filters:0 +batchnorm2d_78/beta:0 +batchnorm2d_78/gamma:0 +batchnorm2d_78/moving_mean:0 +batchnorm2d_78/moving_var:0 +conv2d_74/filters:0 +batchnorm2d_79/beta:0 +batchnorm2d_79/gamma:0 +batchnorm2d_79/moving_mean:0 +batchnorm2d_79/moving_var:0 +conv2d_75/filters:0 +batchnorm2d_81/beta:0 +batchnorm2d_81/gamma:0 +batchnorm2d_81/moving_mean:0 +batchnorm2d_81/moving_var:0 +conv2d_76/filters:0 +batchnorm2d_82/beta:0 +batchnorm2d_82/gamma:0 +batchnorm2d_82/moving_mean:0 +batchnorm2d_82/moving_var:0 +conv2d_77/filters:0 +batchnorm2d_83/beta:0 +batchnorm2d_83/gamma:0 +batchnorm2d_83/moving_mean:0 +batchnorm2d_83/moving_var:0 +conv2d_78/filters:0 +batchnorm2d_84/beta:0 +batchnorm2d_84/gamma:0 +batchnorm2d_84/moving_mean:0 +batchnorm2d_84/moving_var:0 +conv2d_79/filters:0 +batchnorm2d_85/beta:0 +batchnorm2d_85/gamma:0 +batchnorm2d_85/moving_mean:0 +batchnorm2d_85/moving_var:0 +conv2d_80/filters:0 +batchnorm2d_86/beta:0 +batchnorm2d_86/gamma:0 +batchnorm2d_86/moving_mean:0 +batchnorm2d_86/moving_var:0 +conv2d_81/filters:0 +batchnorm2d_88/beta:0 +batchnorm2d_88/gamma:0 +batchnorm2d_88/moving_mean:0 +batchnorm2d_88/moving_var:0 +conv2d_82/filters:0 +batchnorm2d_89/beta:0 +batchnorm2d_89/gamma:0 +batchnorm2d_89/moving_mean:0 +batchnorm2d_89/moving_var:0 +conv2d_83/filters:0 +batchnorm2d_90/beta:0 +batchnorm2d_90/gamma:0 +batchnorm2d_90/moving_mean:0 +batchnorm2d_90/moving_var:0 +conv2d_84/filters:0 +batchnorm2d_91/beta:0 +batchnorm2d_91/gamma:0 +batchnorm2d_91/moving_mean:0 +batchnorm2d_91/moving_var:0 +conv2d_85/filters:0 +batchnorm2d_92/beta:0 +batchnorm2d_92/gamma:0 +batchnorm2d_92/moving_mean:0 +batchnorm2d_92/moving_var:0 +conv_route_1/filters:0 +batchnorm2d_93/beta:0 +batchnorm2d_93/gamma:0 +batchnorm2d_93/moving_mean:0 +batchnorm2d_93/moving_var:0 +conv_route_2/filters:0 +conv2d_86/filters:0 +conv2d_86/biases:0 +batchnorm2d_94/beta:0 +batchnorm2d_94/gamma:0 +batchnorm2d_94/moving_mean:0 +batchnorm2d_94/moving_var:0 +conv2d_87/filters:0 +batchnorm2d_95/beta:0 +batchnorm2d_95/gamma:0 +batchnorm2d_95/moving_mean:0 +batchnorm2d_95/moving_var:0 +conv2d_88/filters:0 +batchnorm2d_96/beta:0 +batchnorm2d_96/gamma:0 +batchnorm2d_96/moving_mean:0 +batchnorm2d_96/moving_var:0 +conv2d_89/filters:0 +batchnorm2d_97/beta:0 +batchnorm2d_97/gamma:0 +batchnorm2d_97/moving_mean:0 +batchnorm2d_97/moving_var:0 +conv2d_90/filters:0 +batchnorm2d_98/beta:0 +batchnorm2d_98/gamma:0 +batchnorm2d_98/moving_mean:0 +batchnorm2d_98/moving_var:0 +conv2d_91/filters:0 +batchnorm2d_99/beta:0 +batchnorm2d_99/gamma:0 +batchnorm2d_99/moving_mean:0 +batchnorm2d_99/moving_var:0 +conv_route_3/filters:0 +batchnorm2d_100/beta:0 +batchnorm2d_100/gamma:0 +batchnorm2d_100/moving_mean:0 +batchnorm2d_100/moving_var:0 +conv_route_4/filters:0 +conv2d_92/filters:0 +conv2d_92/biases:0 +batchnorm2d_101/beta:0 +batchnorm2d_101/gamma:0 +batchnorm2d_101/moving_mean:0 +batchnorm2d_101/moving_var:0 +conv2d_93/filters:0 +batchnorm2d_102/beta:0 +batchnorm2d_102/gamma:0 +batchnorm2d_102/moving_mean:0 +batchnorm2d_102/moving_var:0 +conv2d_94/filters:0 +batchnorm2d_103/beta:0 +batchnorm2d_103/gamma:0 +batchnorm2d_103/moving_mean:0 +batchnorm2d_103/moving_var:0 +conv2d_95/filters:0 +batchnorm2d_104/beta:0 +batchnorm2d_104/gamma:0 +batchnorm2d_104/moving_mean:0 +batchnorm2d_104/moving_var:0 +conv2d_96/filters:0 +batchnorm2d_105/beta:0 +batchnorm2d_105/gamma:0 +batchnorm2d_105/moving_mean:0 +batchnorm2d_105/moving_var:0 +conv2d_97/filters:0 +batchnorm2d_106/beta:0 +batchnorm2d_106/gamma:0 +batchnorm2d_106/moving_mean:0 +batchnorm2d_106/moving_var:0 +conv2d_98/filters:0 +batchnorm2d_107/beta:0 +batchnorm2d_107/gamma:0 +batchnorm2d_107/moving_mean:0 +batchnorm2d_107/moving_var:0 +conv2d_99/filters:0 +conv2d_99/biases:0 \ No newline at end of file diff --git a/examples/model_zoo/model/weights_3.txt b/examples/model_zoo/model/weights_3.txt new file mode 100644 index 000000000..b9ff6e190 --- /dev/null +++ b/examples/model_zoo/model/weights_3.txt @@ -0,0 +1,541 @@ +conv2d_1/filters:0 +batchnorm2d_1/beta:0 +batchnorm2d_1/gamma:0 +batchnorm2d_1/moving_mean:0 +batchnorm2d_1/moving_var:0 +conv2d_2/filters:0 +batchnorm2d_2/beta:0 +batchnorm2d_2/gamma:0 +batchnorm2d_2/moving_mean:0 +batchnorm2d_2/moving_var:0 +conv_rote_block_1/filters:0 +batchnorm2d_3/beta:0 +batchnorm2d_3/gamma:0 +batchnorm2d_3/moving_mean:0 +batchnorm2d_3/moving_var:0 +conv2d_3/filters:0 +batchnorm2d_4/beta:0 +batchnorm2d_4/gamma:0 +batchnorm2d_4/moving_mean:0 +batchnorm2d_4/moving_var:0 +conv2d_4/filters:0 +batchnorm2d_5/beta:0 +batchnorm2d_5/gamma:0 +batchnorm2d_5/moving_mean:0 +batchnorm2d_5/moving_var:0 +conv2d_5/filters:0 +batchnorm2d_6/beta:0 +batchnorm2d_6/gamma:0 +batchnorm2d_6/moving_mean:0 +batchnorm2d_6/moving_var:0 +conv2d_6/filters:0 +batchnorm2d_7/beta:0 +batchnorm2d_7/gamma:0 +batchnorm2d_7/moving_mean:0 +batchnorm2d_7/moving_var:0 +conv2d_7/filters:0 +batchnorm2d_8/beta:0 +batchnorm2d_8/gamma:0 +batchnorm2d_8/moving_mean:0 +batchnorm2d_8/moving_var:0 +conv2d_8/filters:0 +batchnorm2d_9/beta:0 +batchnorm2d_9/gamma:0 +batchnorm2d_9/moving_mean:0 +batchnorm2d_9/moving_var:0 +conv_rote_block_2/filters:0 +batchnorm2d_10/beta:0 +batchnorm2d_10/gamma:0 +batchnorm2d_10/moving_mean:0 +batchnorm2d_10/moving_var:0 +conv2d_9/filters:0 +batchnorm2d_11/beta:0 +batchnorm2d_11/gamma:0 +batchnorm2d_11/moving_mean:0 +batchnorm2d_11/moving_var:0 +conv2d_10/filters:0 +batchnorm2d_12/beta:0 +batchnorm2d_12/gamma:0 +batchnorm2d_12/moving_mean:0 +batchnorm2d_12/moving_var:0 +conv2d_11/filters:0 +batchnorm2d_13/beta:0 +batchnorm2d_13/gamma:0 +batchnorm2d_13/moving_mean:0 +batchnorm2d_13/moving_var:0 +conv2d_12/filters:0 +batchnorm2d_14/beta:0 +batchnorm2d_14/gamma:0 +batchnorm2d_14/moving_mean:0 +batchnorm2d_14/moving_var:0 +conv2d_13/filters:0 +batchnorm2d_15/beta:0 +batchnorm2d_15/gamma:0 +batchnorm2d_15/moving_mean:0 +batchnorm2d_15/moving_var:0 +conv2d_14/filters:0 +batchnorm2d_16/beta:0 +batchnorm2d_16/gamma:0 +batchnorm2d_16/moving_mean:0 +batchnorm2d_16/moving_var:0 +conv2d_15/filters:0 +batchnorm2d_17/beta:0 +batchnorm2d_17/gamma:0 +batchnorm2d_17/moving_mean:0 +batchnorm2d_17/moving_var:0 +conv2d_16/filters:0 +batchnorm2d_18/beta:0 +batchnorm2d_18/gamma:0 +batchnorm2d_18/moving_mean:0 +batchnorm2d_18/moving_var:0 +conv_rote_block_3/filters:0 +batchnorm2d_19/beta:0 +batchnorm2d_19/gamma:0 +batchnorm2d_19/moving_mean:0 +batchnorm2d_19/moving_var:0 +conv2d_17/filters:0 +batchnorm2d_20/beta:0 +batchnorm2d_20/gamma:0 +batchnorm2d_20/moving_mean:0 +batchnorm2d_20/moving_var:0 +conv2d_18/filters:0 +batchnorm2d_21/beta:0 +batchnorm2d_21/gamma:0 +batchnorm2d_21/moving_mean:0 +batchnorm2d_21/moving_var:0 +conv2d_19/filters:0 +batchnorm2d_22/beta:0 +batchnorm2d_22/gamma:0 +batchnorm2d_22/moving_mean:0 +batchnorm2d_22/moving_var:0 +conv2d_20/filters:0 +batchnorm2d_23/beta:0 +batchnorm2d_23/gamma:0 +batchnorm2d_23/moving_mean:0 +batchnorm2d_23/moving_var:0 +conv2d_21/filters:0 +batchnorm2d_24/beta:0 +batchnorm2d_24/gamma:0 +batchnorm2d_24/moving_mean:0 +batchnorm2d_24/moving_var:0 +conv2d_22/filters:0 +batchnorm2d_25/beta:0 +batchnorm2d_25/gamma:0 +batchnorm2d_25/moving_mean:0 +batchnorm2d_25/moving_var:0 +conv2d_23/filters:0 +batchnorm2d_26/beta:0 +batchnorm2d_26/gamma:0 +batchnorm2d_26/moving_mean:0 +batchnorm2d_26/moving_var:0 +conv2d_24/filters:0 +batchnorm2d_27/beta:0 +batchnorm2d_27/gamma:0 +batchnorm2d_27/moving_mean:0 +batchnorm2d_27/moving_var:0 +conv2d_25/filters:0 +batchnorm2d_28/beta:0 +batchnorm2d_28/gamma:0 +batchnorm2d_28/moving_mean:0 +batchnorm2d_28/moving_var:0 +conv2d_26/filters:0 +batchnorm2d_29/beta:0 +batchnorm2d_29/gamma:0 +batchnorm2d_29/moving_mean:0 +batchnorm2d_29/moving_var:0 +conv2d_27/filters:0 +batchnorm2d_30/beta:0 +batchnorm2d_30/gamma:0 +batchnorm2d_30/moving_mean:0 +batchnorm2d_30/moving_var:0 +conv2d_28/filters:0 +batchnorm2d_31/beta:0 +batchnorm2d_31/gamma:0 +batchnorm2d_31/moving_mean:0 +batchnorm2d_31/moving_var:0 +conv2d_29/filters:0 +batchnorm2d_32/beta:0 +batchnorm2d_32/gamma:0 +batchnorm2d_32/moving_mean:0 +batchnorm2d_32/moving_var:0 +conv2d_30/filters:0 +batchnorm2d_33/beta:0 +batchnorm2d_33/gamma:0 +batchnorm2d_33/moving_mean:0 +batchnorm2d_33/moving_var:0 +conv2d_31/filters:0 +batchnorm2d_34/beta:0 +batchnorm2d_34/gamma:0 +batchnorm2d_34/moving_mean:0 +batchnorm2d_34/moving_var:0 +conv2d_32/filters:0 +batchnorm2d_35/beta:0 +batchnorm2d_35/gamma:0 +batchnorm2d_35/moving_mean:0 +batchnorm2d_35/moving_var:0 +conv2d_33/filters:0 +batchnorm2d_36/beta:0 +batchnorm2d_36/gamma:0 +batchnorm2d_36/moving_mean:0 +batchnorm2d_36/moving_var:0 +conv2d_34/filters:0 +batchnorm2d_37/beta:0 +batchnorm2d_37/gamma:0 +batchnorm2d_37/moving_mean:0 +batchnorm2d_37/moving_var:0 +conv2d_35/filters:0 +batchnorm2d_38/beta:0 +batchnorm2d_38/gamma:0 +batchnorm2d_38/moving_mean:0 +batchnorm2d_38/moving_var:0 +conv2d_36/filters:0 +batchnorm2d_39/beta:0 +batchnorm2d_39/gamma:0 +batchnorm2d_39/moving_mean:0 +batchnorm2d_39/moving_var:0 +conv_rote_block_4/filters:0 +batchnorm2d_40/beta:0 +batchnorm2d_40/gamma:0 +batchnorm2d_40/moving_mean:0 +batchnorm2d_40/moving_var:0 +conv2d_37/filters:0 +batchnorm2d_41/beta:0 +batchnorm2d_41/gamma:0 +batchnorm2d_41/moving_mean:0 +batchnorm2d_41/moving_var:0 +conv2d_38/filters:0 +batchnorm2d_42/beta:0 +batchnorm2d_42/gamma:0 +batchnorm2d_42/moving_mean:0 +batchnorm2d_42/moving_var:0 +conv2d_39/filters:0 +batchnorm2d_43/beta:0 +batchnorm2d_43/gamma:0 +batchnorm2d_43/moving_mean:0 +batchnorm2d_43/moving_var:0 +conv2d_40/filters:0 +batchnorm2d_44/beta:0 +batchnorm2d_44/gamma:0 +batchnorm2d_44/moving_mean:0 +batchnorm2d_44/moving_var:0 +conv2d_41/filters:0 +batchnorm2d_45/beta:0 +batchnorm2d_45/gamma:0 +batchnorm2d_45/moving_mean:0 +batchnorm2d_45/moving_var:0 +conv2d_42/filters:0 +batchnorm2d_46/beta:0 +batchnorm2d_46/gamma:0 +batchnorm2d_46/moving_mean:0 +batchnorm2d_46/moving_var:0 +conv2d_43/filters:0 +batchnorm2d_47/beta:0 +batchnorm2d_47/gamma:0 +batchnorm2d_47/moving_mean:0 +batchnorm2d_47/moving_var:0 +conv2d_44/filters:0 +batchnorm2d_48/beta:0 +batchnorm2d_48/gamma:0 +batchnorm2d_48/moving_mean:0 +batchnorm2d_48/moving_var:0 +conv2d_45/filters:0 +batchnorm2d_49/beta:0 +batchnorm2d_49/gamma:0 +batchnorm2d_49/moving_mean:0 +batchnorm2d_49/moving_var:0 +conv2d_46/filters:0 +batchnorm2d_50/beta:0 +batchnorm2d_50/gamma:0 +batchnorm2d_50/moving_mean:0 +batchnorm2d_50/moving_var:0 +conv2d_47/filters:0 +batchnorm2d_51/beta:0 +batchnorm2d_51/gamma:0 +batchnorm2d_51/moving_mean:0 +batchnorm2d_51/moving_var:0 +conv2d_48/filters:0 +batchnorm2d_52/beta:0 +batchnorm2d_52/gamma:0 +batchnorm2d_52/moving_mean:0 +batchnorm2d_52/moving_var:0 +conv2d_49/filters:0 +batchnorm2d_53/beta:0 +batchnorm2d_53/gamma:0 +batchnorm2d_53/moving_mean:0 +batchnorm2d_53/moving_var:0 +conv2d_50/filters:0 +batchnorm2d_54/beta:0 +batchnorm2d_54/gamma:0 +batchnorm2d_54/moving_mean:0 +batchnorm2d_54/moving_var:0 +conv2d_51/filters:0 +batchnorm2d_55/beta:0 +batchnorm2d_55/gamma:0 +batchnorm2d_55/moving_mean:0 +batchnorm2d_55/moving_var:0 +conv2d_52/filters:0 +batchnorm2d_56/beta:0 +batchnorm2d_56/gamma:0 +batchnorm2d_56/moving_mean:0 +batchnorm2d_56/moving_var:0 +conv2d_53/filters:0 +batchnorm2d_57/beta:0 +batchnorm2d_57/gamma:0 +batchnorm2d_57/moving_mean:0 +batchnorm2d_57/moving_var:0 +conv2d_54/filters:0 +batchnorm2d_58/beta:0 +batchnorm2d_58/gamma:0 +batchnorm2d_58/moving_mean:0 +batchnorm2d_58/moving_var:0 +conv2d_55/filters:0 +batchnorm2d_59/beta:0 +batchnorm2d_59/gamma:0 +batchnorm2d_59/moving_mean:0 +batchnorm2d_59/moving_var:0 +conv2d_56/filters:0 +batchnorm2d_60/beta:0 +batchnorm2d_60/gamma:0 +batchnorm2d_60/moving_mean:0 +batchnorm2d_60/moving_var:0 +conv_rote_block_5/filters:0 +batchnorm2d_61/beta:0 +batchnorm2d_61/gamma:0 +batchnorm2d_61/moving_mean:0 +batchnorm2d_61/moving_var:0 +conv2d_57/filters:0 +batchnorm2d_62/beta:0 +batchnorm2d_62/gamma:0 +batchnorm2d_62/moving_mean:0 +batchnorm2d_62/moving_var:0 +conv2d_58/filters:0 +batchnorm2d_63/beta:0 +batchnorm2d_63/gamma:0 +batchnorm2d_63/moving_mean:0 +batchnorm2d_63/moving_var:0 +conv2d_59/filters:0 +batchnorm2d_64/beta:0 +batchnorm2d_64/gamma:0 +batchnorm2d_64/moving_mean:0 +batchnorm2d_64/moving_var:0 +conv2d_60/filters:0 +batchnorm2d_65/beta:0 +batchnorm2d_65/gamma:0 +batchnorm2d_65/moving_mean:0 +batchnorm2d_65/moving_var:0 +conv2d_61/filters:0 +batchnorm2d_66/beta:0 +batchnorm2d_66/gamma:0 +batchnorm2d_66/moving_mean:0 +batchnorm2d_66/moving_var:0 +conv2d_62/filters:0 +batchnorm2d_67/beta:0 +batchnorm2d_67/gamma:0 +batchnorm2d_67/moving_mean:0 +batchnorm2d_67/moving_var:0 +conv2d_63/filters:0 +batchnorm2d_68/beta:0 +batchnorm2d_68/gamma:0 +batchnorm2d_68/moving_mean:0 +batchnorm2d_68/moving_var:0 +conv2d_64/filters:0 +batchnorm2d_69/beta:0 +batchnorm2d_69/gamma:0 +batchnorm2d_69/moving_mean:0 +batchnorm2d_69/moving_var:0 +conv2d_65/filters:0 +batchnorm2d_70/beta:0 +batchnorm2d_70/gamma:0 +batchnorm2d_70/moving_mean:0 +batchnorm2d_70/moving_var:0 +conv2d_66/filters:0 +batchnorm2d_71/beta:0 +batchnorm2d_71/gamma:0 +batchnorm2d_71/moving_mean:0 +batchnorm2d_71/moving_var:0 +conv2d_67/filters:0 +batchnorm2d_72/beta:0 +batchnorm2d_72/gamma:0 +batchnorm2d_72/moving_mean:0 +batchnorm2d_72/moving_var:0 +conv2d_68/filters:0 +batchnorm2d_73/beta:0 +batchnorm2d_73/gamma:0 +batchnorm2d_73/moving_mean:0 +batchnorm2d_73/moving_var:0 +conv2d_69/filters:0 +batchnorm2d_74/beta:0 +batchnorm2d_74/gamma:0 +batchnorm2d_74/moving_mean:0 +batchnorm2d_74/moving_var:0 +conv2d_70/filters:0 +batchnorm2d_75/beta:0 +batchnorm2d_75/gamma:0 +batchnorm2d_75/moving_mean:0 +batchnorm2d_75/moving_var:0 +conv2d_71/filters:0 +batchnorm2d_76/beta:0 +batchnorm2d_76/gamma:0 +batchnorm2d_76/moving_mean:0 +batchnorm2d_76/moving_var:0 +conv2d_72/filters:0 +batchnorm2d_77/beta:0 +batchnorm2d_77/gamma:0 +batchnorm2d_77/moving_mean:0 +batchnorm2d_77/moving_var:0 +conv2d_73/filters:0 +batchnorm2d_78/beta:0 +batchnorm2d_78/gamma:0 +batchnorm2d_78/moving_mean:0 +batchnorm2d_78/moving_var:0 +conv2d_74/filters:0 +batchnorm2d_79/beta:0 +batchnorm2d_79/gamma:0 +batchnorm2d_79/moving_mean:0 +batchnorm2d_79/moving_var:0 +conv_yolo_1/filters:0 +batchnorm2d_80/beta:0 +batchnorm2d_80/gamma:0 +batchnorm2d_80/moving_mean:0 +batchnorm2d_80/moving_var:0 +conv2d_75/filters:0 +batchnorm2d_81/beta:0 +batchnorm2d_81/gamma:0 +batchnorm2d_81/moving_mean:0 +batchnorm2d_81/moving_var:0 +conv2d_76/filters:0 +batchnorm2d_82/beta:0 +batchnorm2d_82/gamma:0 +batchnorm2d_82/moving_mean:0 +batchnorm2d_82/moving_var:0 +conv2d_77/filters:0 +batchnorm2d_83/beta:0 +batchnorm2d_83/gamma:0 +batchnorm2d_83/moving_mean:0 +batchnorm2d_83/moving_var:0 +conv2d_78/filters:0 +batchnorm2d_84/beta:0 +batchnorm2d_84/gamma:0 +batchnorm2d_84/moving_mean:0 +batchnorm2d_84/moving_var:0 +conv2d_79/filters:0 +batchnorm2d_85/beta:0 +batchnorm2d_85/gamma:0 +batchnorm2d_85/moving_mean:0 +batchnorm2d_85/moving_var:0 +conv2d_80/filters:0 +batchnorm2d_86/beta:0 +batchnorm2d_86/gamma:0 +batchnorm2d_86/moving_mean:0 +batchnorm2d_86/moving_var:0 +conv_yolo_2/filters:0 +batchnorm2d_87/beta:0 +batchnorm2d_87/gamma:0 +batchnorm2d_87/moving_mean:0 +batchnorm2d_87/moving_var:0 +conv2d_81/filters:0 +batchnorm2d_88/beta:0 +batchnorm2d_88/gamma:0 +batchnorm2d_88/moving_mean:0 +batchnorm2d_88/moving_var:0 +conv2d_82/filters:0 +batchnorm2d_89/beta:0 +batchnorm2d_89/gamma:0 +batchnorm2d_89/moving_mean:0 +batchnorm2d_89/moving_var:0 +conv2d_83/filters:0 +batchnorm2d_90/beta:0 +batchnorm2d_90/gamma:0 +batchnorm2d_90/moving_mean:0 +batchnorm2d_90/moving_var:0 +conv2d_84/filters:0 +batchnorm2d_91/beta:0 +batchnorm2d_91/gamma:0 +batchnorm2d_91/moving_mean:0 +batchnorm2d_91/moving_var:0 +conv2d_85/filters:0 +batchnorm2d_92/beta:0 +batchnorm2d_92/gamma:0 +batchnorm2d_92/moving_mean:0 +batchnorm2d_92/moving_var:0 +conv_route_1/filters:0 +batchnorm2d_93/beta:0 +batchnorm2d_93/gamma:0 +batchnorm2d_93/moving_mean:0 +batchnorm2d_93/moving_var:0 +conv2d_86/filters:0 +conv2d_86/biases:0 +conv_route_2/filters:0 +batchnorm2d_94/beta:0 +batchnorm2d_94/gamma:0 +batchnorm2d_94/moving_mean:0 +batchnorm2d_94/moving_var:0 +conv2d_87/filters:0 +batchnorm2d_95/beta:0 +batchnorm2d_95/gamma:0 +batchnorm2d_95/moving_mean:0 +batchnorm2d_95/moving_var:0 +conv2d_88/filters:0 +batchnorm2d_96/beta:0 +batchnorm2d_96/gamma:0 +batchnorm2d_96/moving_mean:0 +batchnorm2d_96/moving_var:0 +conv2d_89/filters:0 +batchnorm2d_97/beta:0 +batchnorm2d_97/gamma:0 +batchnorm2d_97/moving_mean:0 +batchnorm2d_97/moving_var:0 +conv2d_90/filters:0 +batchnorm2d_98/beta:0 +batchnorm2d_98/gamma:0 +batchnorm2d_98/moving_mean:0 +batchnorm2d_98/moving_var:0 +conv2d_91/filters:0 +batchnorm2d_99/beta:0 +batchnorm2d_99/gamma:0 +batchnorm2d_99/moving_mean:0 +batchnorm2d_99/moving_var:0 +conv_route_3/filters:0 +batchnorm2d_100/beta:0 +batchnorm2d_100/gamma:0 +batchnorm2d_100/moving_mean:0 +batchnorm2d_100/moving_var:0 +conv2d_92/filters:0 +conv2d_92/biases:0 +conv_route_4/filters:0 +batchnorm2d_101/beta:0 +batchnorm2d_101/gamma:0 +batchnorm2d_101/moving_mean:0 +batchnorm2d_101/moving_var:0 +conv2d_93/filters:0 +batchnorm2d_102/beta:0 +batchnorm2d_102/gamma:0 +batchnorm2d_102/moving_mean:0 +batchnorm2d_102/moving_var:0 +conv2d_94/filters:0 +batchnorm2d_103/beta:0 +batchnorm2d_103/gamma:0 +batchnorm2d_103/moving_mean:0 +batchnorm2d_103/moving_var:0 +conv2d_95/filters:0 +batchnorm2d_104/beta:0 +batchnorm2d_104/gamma:0 +batchnorm2d_104/moving_mean:0 +batchnorm2d_104/moving_var:0 +conv2d_96/filters:0 +batchnorm2d_105/beta:0 +batchnorm2d_105/gamma:0 +batchnorm2d_105/moving_mean:0 +batchnorm2d_105/moving_var:0 +conv2d_97/filters:0 +batchnorm2d_106/beta:0 +batchnorm2d_106/gamma:0 +batchnorm2d_106/moving_mean:0 +batchnorm2d_106/moving_var:0 +conv2d_98/filters:0 +batchnorm2d_107/beta:0 +batchnorm2d_107/gamma:0 +batchnorm2d_107/moving_mean:0 +batchnorm2d_107/moving_var:0 +conv2d_99/filters:0 +conv2d_99/biases:0 \ No newline at end of file diff --git a/examples/model_zoo/model/yolov4_weights3_config.txt b/examples/model_zoo/model/yolov4_weights3_config.txt new file mode 100644 index 000000000..5f31bb51d --- /dev/null +++ b/examples/model_zoo/model/yolov4_weights3_config.txt @@ -0,0 +1,541 @@ +layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-2/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-11/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-4/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-6/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-8/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-10/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-14/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-16/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-29/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-18/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-20/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-22/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-24/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-26/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-28/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-32/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-34/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-71/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-36/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-38/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-40/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-42/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-44/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-46/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-48/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-50/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-52/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-54/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-56/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-58/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-60/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-62/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-64/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-66/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-68/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-70/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-74/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-76/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-113/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-78/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-80/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-82/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-84/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-86/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-88/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-90/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-92/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-94/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-96/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-98/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-100/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-102/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-104/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-106/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-108/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-110/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-112/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-116/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-118/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-139/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-120/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-122/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-124/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-126/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-128/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-130/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-132/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-134/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-136/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-138/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-142/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-144/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-146/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-148/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-150/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-152/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-154/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-156/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-157/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-160/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-162/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-164/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-166/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-168/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-170/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-171/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-174/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-176/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-178/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-180/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-182/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-208/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-214/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-214/bias/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-184/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-186/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-188/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-190/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-192/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-194/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-209/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-215/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-215/bias/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-196/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-198/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-200/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-202/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-204/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-206/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-210/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-216/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-216/bias/.ATTRIBUTES/VARIABLE_VALUE \ No newline at end of file diff --git a/examples/model_zoo/model/yolov4_weights_config.txt b/examples/model_zoo/model/yolov4_weights_config.txt new file mode 100644 index 000000000..2c28be036 --- /dev/null +++ b/examples/model_zoo/model/yolov4_weights_config.txt @@ -0,0 +1,541 @@ +layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-1/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-2/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-3/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-11/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-4/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-13/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-5/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-6/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-7/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-8/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-9/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-10/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-12/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-14/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-15/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-16/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-17/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-29/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-18/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-31/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-19/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-20/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-21/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-22/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-23/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-24/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-25/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-26/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-27/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-28/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-30/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-32/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-33/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-34/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-35/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-71/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-36/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-73/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-37/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-38/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-39/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-40/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-41/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-42/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-43/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-44/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-45/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-46/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-47/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-48/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-49/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-50/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-51/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-52/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-53/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-54/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-55/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-56/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-57/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-58/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-59/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-60/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-61/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-62/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-63/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-64/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-65/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-66/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-67/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-68/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-69/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-70/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-72/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-74/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-75/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-171/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-173/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-76/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-77/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-113/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-78/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-115/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-79/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-80/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-81/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-82/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-83/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-84/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-85/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-86/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-87/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-88/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-89/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-90/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-91/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-92/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-93/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-94/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-95/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-96/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-97/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-98/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-99/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-100/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-101/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-102/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-103/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-104/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-105/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-106/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-107/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-108/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-109/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-110/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-111/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-112/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-114/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-116/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-117/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-157/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-159/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-118/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-119/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-139/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-120/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-141/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-121/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-122/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-123/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-124/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-125/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-126/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-127/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-128/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-129/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-130/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-131/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-132/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-133/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-134/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-135/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-136/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-137/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-138/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-140/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-142/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-143/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-144/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-145/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-146/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-147/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-148/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-149/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-150/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-151/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-152/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-153/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-154/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-155/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-156/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-158/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-160/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-161/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-162/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-163/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-164/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-165/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-166/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-167/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-168/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-169/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-170/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-172/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-174/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-175/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-176/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-177/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-178/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-179/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-180/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-181/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-182/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-183/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-208/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-211/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-184/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-214/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-214/bias/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-185/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-186/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-187/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-188/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-189/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-190/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-191/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-192/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-193/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-194/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-195/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-209/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-212/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-196/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-215/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-215/bias/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-197/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-198/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-199/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-200/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-201/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-202/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-203/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-204/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-205/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-206/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-207/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-210/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/beta/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/gamma/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/moving_mean/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-213/moving_variance/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-216/kernel/.ATTRIBUTES/VARIABLE_VALUE +layer_with_weights-216/bias/.ATTRIBUTES/VARIABLE_VALUE \ No newline at end of file diff --git a/examples/model_zoo/pretrained_resnet50.py b/examples/model_zoo/pretrained_resnet50.py new file mode 100644 index 000000000..cac33eb1d --- /dev/null +++ b/examples/model_zoo/pretrained_resnet50.py @@ -0,0 +1,32 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +""" +ResNet50 for ImageNet using TL models + +""" + +import time +import numpy as np +import tensorlayer as tl +from examples.model_zoo.imagenet_classes import class_names +from examples.model_zoo.resnet import ResNet50 + +tl.logging.set_verbosity(tl.logging.DEBUG) + +# get the whole model +resnet = ResNet50(pretrained=False) +resnet.set_eval() + +img1 = tl.vis.read_image('data/tiger.jpeg') +img1 = tl.prepro.imresize(img1, (224, 224))[:, :, ::-1] +img1 = img1 - np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) + +img1 = img1.astype(np.float32)[np.newaxis, ...] + +start_time = time.time() +output = resnet(img1) +prob = tl.ops.softmax(output)[0].numpy() +print(" End time : %.5ss" % (time.time() - start_time)) +preds = (np.argsort(prob)[::-1])[0:5] +for p in preds: + print(class_names[p], prob[p]) diff --git a/examples/model_zoo/pretrained_vgg16.py b/examples/model_zoo/pretrained_vgg16.py new file mode 100644 index 000000000..9bf4264ed --- /dev/null +++ b/examples/model_zoo/pretrained_vgg16.py @@ -0,0 +1,29 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +"""VGG-16 for ImageNet using TL models.""" + +import time + +import numpy as np +import tensorflow as tf + +import tensorlayer as tl +from examples.model_zoo.imagenet_classes import class_names +from examples.model_zoo.vgg import vgg16 + +tl.logging.set_verbosity(tl.logging.DEBUG) + +# get the whole model +vgg = vgg16(pretrained=True) +vgg.set_eval() + +img = tl.vis.read_image('data/tiger.jpeg') +img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 + +start_time = time.time() +output = vgg(img) +probs = tf.nn.softmax(output)[0].numpy() +print(" End time : %.5ss" % (time.time() - start_time)) +preds = (np.argsort(probs)[::-1])[0:5] +for p in preds: + print(class_names[p], probs[p]) diff --git a/examples/model_zoo/pretrained_yolov4.py b/examples/model_zoo/pretrained_yolov4.py new file mode 100644 index 000000000..c8d390886 --- /dev/null +++ b/examples/model_zoo/pretrained_yolov4.py @@ -0,0 +1,28 @@ +import numpy as np +import cv2 +from PIL import Image +from examples.model_zoo.common import yolo4_input_processing, yolo4_output_processing, \ + result_to_json, read_class_names, draw_boxes_and_labels_to_image_with_json +from examples.model_zoo.yolo import YOLOv4 +import tensorlayer as tl + +tl.logging.set_verbosity(tl.logging.DEBUG) + +INPUT_SIZE = 416 +image_path = './data/kite.jpg' + +class_names = read_class_names('./model/coco.names') +original_image = cv2.imread(image_path) +image = cv2.cvtColor(np.array(original_image), cv2.COLOR_BGR2RGB) + +model = YOLOv4(NUM_CLASS=80, pretrained=True) +model.set_eval() + +batch_data = yolo4_input_processing(original_image) +feature_maps = model(batch_data) +pred_bbox = yolo4_output_processing(feature_maps) +json_result = result_to_json(image, pred_bbox) + +image = draw_boxes_and_labels_to_image_with_json(image, json_result, class_names) +image = Image.fromarray(image.astype(np.uint8)) +image.show() \ No newline at end of file diff --git a/examples/model_zoo/resnet.py b/examples/model_zoo/resnet.py new file mode 100644 index 000000000..c57bef9de --- /dev/null +++ b/examples/model_zoo/resnet.py @@ -0,0 +1,225 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +"""ResNet for ImageNet. + +# Reference: +- [Deep Residual Learning for Image Recognition]( + https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award) + +""" + +import os + +import tensorlayer as tl + +from tensorlayer import logging +from tensorlayer.files import (assign_weights, maybe_download_and_extract) +from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Elementwise, GlobalMeanPool2d, Input, MaxPool2d) +from tensorlayer.layers import Module, SequentialLayer + +__all__ = [ + 'ResNet50', +] + +block_names = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c' + ] + ['avg_pool', 'fc1000'] +block_filters = [[64, 64, 256], [128, 128, 512], [256, 256, 1024], [512, 512, 2048]] +in_channels_conv = [64, 256, 512, 1024] +in_channels_identity = [256, 512, 1024, 2048] +henorm = tl.initializers.he_normal() + +class identity_block(Module): + """The identity block where there is no conv layer at shortcut. + + Parameters + ---------- + input : tf tensor + Input tensor from above layer. + kernel_size : int + The kernel size of middle conv layer at main path. + n_filters : list of integers + The numbers of filters for 3 conv layer at main path. + stage : int + Current stage label. + block : str + Current block label. + + Returns + ------- + Output tensor of this block. + + """ + def __init__(self, kernel_size, n_filters, stage, block): + super(identity_block, self).__init__() + filters1, filters2, filters3 = n_filters + _in_channels = in_channels_identity[stage-2] + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + + self.conv1 = Conv2d(filters1, (1, 1), W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels) + self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) + + ks = (kernel_size, kernel_size) + self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) + self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) + + self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) + self.bn3 = BatchNorm(name=bn_name_base + '2c', num_features=filters3) + + self.add = Elementwise(tl.add, act='relu') + + def forward(self, inputs): + output = self.conv1(inputs) + output = self.bn1(output) + output = self.conv2(output) + output = self.bn2(output) + output = self.conv3(output) + output = self.bn3(output) + result = self.add([output, inputs]) + return result + + +class conv_block(Module): + def __init__(self, kernel_size, n_filters, stage, block, strides=(2, 2)): + super(conv_block, self).__init__() + filters1, filters2, filters3 = n_filters + _in_channels = in_channels_conv[stage-2] + conv_name_base = 'res' + str(stage) + block + '_branch' + bn_name_base = 'bn' + str(stage) + block + '_branch' + self.conv1 = Conv2d(filters1, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels) + self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) + + ks = (kernel_size, kernel_size) + self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) + self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) + + self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) + self.bn3 = BatchNorm(name=bn_name_base + '2c', num_features=filters3) + + self.shortcut_conv = Conv2d(filters3, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '1', in_channels=_in_channels) + self.shortcut_bn = BatchNorm(name=bn_name_base + '1', num_features=filters3) + + self.add = Elementwise(tl.add, act='relu') + + def forward(self, inputs): + output = self.conv1(inputs) + output = self.bn1(output) + output = self.conv2(output) + output = self.bn2(output) + output = self.conv3(output) + output = self.bn3(output) + + shortcut = self.shortcut_conv(inputs) + shortcut = self.shortcut_bn(shortcut) + + result = self.add([output, shortcut]) + return result + + +class ResNet50_model(Module): + def __init__(self, end_with='fc1000', n_classes=1000): + super(ResNet50_model, self).__init__() + self.end_with = end_with + self.n_classes = n_classes + self.conv1 = Conv2d(64, (7, 7), in_channels=3, strides=(2, 2), padding='SAME', W_init=henorm, name='conv1') + self.bn_conv1 = BatchNorm(name='bn_conv1', act="relu", num_features=64) + self.max_pool1 = MaxPool2d((3, 3), strides=(2, 2), name='max_pool1') + self.res_layer = self.make_layer() + + def forward(self, inputs): + z = self.conv1(inputs) + z = self.bn_conv1(z) + z = self.max_pool1(z) + z = self.res_layer(z) + return z + + def make_layer(self): + layer_list = [] + for i, block_name in enumerate(block_names): + if len(block_name) == 2: + stage = int(block_name[0]) + block = block_name[1] + if block == 'a': + strides = (1, 1) if stage == 2 else (2, 2) + layer_list.append(conv_block(3, block_filters[stage - 2], stage=stage, block=block, strides=strides)) + else: + layer_list.append(identity_block(3, block_filters[stage - 2], stage=stage, block=block)) + elif block_name == 'avg_pool': + layer_list.append(GlobalMeanPool2d(name='avg_pool')) + elif block_name == 'fc1000': + layer_list.append(Dense(self.n_classes, name='fc1000', in_channels=2048)) + + if block_name == self.end_with: + break + return SequentialLayer(layer_list) + + +def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): + """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3]. + To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68]. + + Parameters + ---------- + pretrained : boolean + Whether to load pretrained weights. Default False. + end_with : str + The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. + Default ``out`` i.e. the whole model. + n_classes : int + Number of classes in final prediction. + name : None or str + Name for this model. + + Examples + --------- + Classify ImageNet classes, see `tutorial_models_resnet50.py` + TODO Modify the usage example according to the model storage location + >>> # get the whole model with pretrained weights + >>> resnet = tl.models.ResNet50(pretrained=True) + >>> # use for inferencing + >>> output = resnet(img1, is_train=False) + >>> prob = tf.nn.softmax(output)[0].numpy() + + Extract the features before fc layer + >>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c') + >>> output = resnet(img1, is_train=False) + + Returns + ------- + ResNet50 model. + + """ + + network = ResNet50_model(end_with=end_with, n_classes=n_classes) + + if pretrained: + restore_params(network) + + return network + + +def restore_params(network, path='models'): + logging.info("Restore pre-trained parameters") + maybe_download_and_extract( + 'resnet50_weights_tf_dim_ordering_tf_kernels.h5', + path, + 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/', + ) # ls -al + try: + import h5py + except Exception: + raise ImportError('h5py not imported') + + f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r') + + for layer in network.all_layers: + if len(layer.all_weights) == 0: + continue + w_names = list(f[layer.name]) + params = [f[layer.name][n][:] for n in w_names] + # if 'bn' in layer.name: + # params = [x.reshape(1, 1, 1, -1) for x in params] + assign_weights(params, layer) + del params + + f.close() diff --git a/examples/model_zoo/vgg.py b/examples/model_zoo/vgg.py new file mode 100644 index 000000000..779635d3c --- /dev/null +++ b/examples/model_zoo/vgg.py @@ -0,0 +1,347 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +""" +VGG for ImageNet. + +Introduction +---------------- +VGG is a convolutional neural network model proposed by K. Simonyan and A. Zisserman +from the University of Oxford in the paper "Very Deep Convolutional Networks for +Large-Scale Image Recognition" . The model achieves 92.7% top-5 test accuracy in ImageNet, +which is a dataset of over 14 million images belonging to 1000 classes. + +Download Pre-trained Model +---------------------------- +- Model weights in this example - vgg16_weights.npz : http://www.cs.toronto.edu/~frossard/post/vgg16/ +- Model weights in this example - vgg19.npy : https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/ +- Caffe VGG 16 model : https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md +- Tool to convert the Caffe models to TensorFlow's : https://github.com/ethereon/caffe-tensorflow + +Note +------ +- For simplified CNN layer see "Convolutional layer (Simplified)" +in read the docs website. +- When feeding other images to the model be sure to properly resize or crop them +beforehand. Distorted images might end up being misclassified. One way of safely +feeding images of multiple sizes is by doing center cropping. + +""" + +import os + +import numpy as np + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.files import assign_weights, maybe_download_and_extract +from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, SequentialLayer, MaxPool2d) +from tensorlayer.layers import Module + +__all__ = [ + 'VGG', + 'vgg16', + 'vgg19', + 'VGG16', + 'VGG19', + # 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + # 'vgg19_bn', 'vgg19', +] + +layer_names = [ + ['conv1_1', 'conv1_2'], 'pool1', ['conv2_1', 'conv2_2'], 'pool2', + ['conv3_1', 'conv3_2', 'conv3_3', 'conv3_4'], 'pool3', ['conv4_1', 'conv4_2', 'conv4_3', 'conv4_4'], 'pool4', + ['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], 'pool5', 'flatten', 'fc1_relu', 'fc2_relu', 'outputs' +] + +cfg = { + 'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], + 'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'], + 'D': + [ + [64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F', + 'fc1', 'fc2', 'O' + ], + 'E': + [ + [64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512], + 'M', 'F', 'fc1', 'fc2', 'O' + ], +} + +mapped_cfg = { + 'vgg11': 'A', + 'vgg11_bn': 'A', + 'vgg13': 'B', + 'vgg13_bn': 'B', + 'vgg16': 'D', + 'vgg16_bn': 'D', + 'vgg19': 'E', + 'vgg19_bn': 'E' +} + +model_urls = { + 'vgg16': 'http://www.cs.toronto.edu/~frossard/vgg16/', + 'vgg19': 'https://media.githubusercontent.com/media/tensorlayer/pretrained-models/master/models/' +} + +model_saved_name = {'vgg16': 'vgg16_weights.npz', 'vgg19': 'vgg19.npy'} + + +class VGG(Module): + + def __init__(self, layer_type, batch_norm=False, end_with='outputs', name=None): + super(VGG, self).__init__(name=name) + self.end_with = end_with + + config = cfg[mapped_cfg[layer_type]] + self.make_layer = make_layers(config, batch_norm, end_with) + + def forward(self, inputs): + """ + inputs : tensor + Shape [None, 224, 224, 3], value range [0, 1]. + """ + + inputs = inputs * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]) + out = self.make_layer(inputs) + return out + + +def make_layers(config, batch_norm=False, end_with='outputs'): + layer_list = [] + is_end = False + for layer_group_idx, layer_group in enumerate(config): + if isinstance(layer_group, list): + for idx, layer in enumerate(layer_group): + layer_name = layer_names[layer_group_idx][idx] + n_filter = layer + if idx == 0: + if layer_group_idx > 0: + in_channels = config[layer_group_idx - 2][-1] + else: + in_channels = 3 + else: + in_channels = layer_group[idx - 1] + layer_list.append( + Conv2d( + n_filter=n_filter, filter_size=(3, 3), strides=(1, 1), act=tl.ReLU, padding='SAME', + in_channels=in_channels, name=layer_name + ) + ) + if batch_norm: + layer_list.append(BatchNorm(num_features=n_filter)) + if layer_name == end_with: + is_end = True + break + else: + layer_name = layer_names[layer_group_idx] + if layer_group == 'M': + layer_list.append(MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name=layer_name)) + elif layer_group == 'O': + layer_list.append(Dense(n_units=1000, in_channels=4096, name=layer_name)) + elif layer_group == 'F': + layer_list.append(Flatten(name='flatten')) + elif layer_group == 'fc1': + layer_list.append(Dense(n_units=4096, act=tl.ReLU, in_channels=512 * 7 * 7, name=layer_name)) + elif layer_group == 'fc2': + layer_list.append(Dense(n_units=4096, act=tl.ReLU, in_channels=4096, name=layer_name)) + if layer_name == end_with: + is_end = True + if is_end: + break + return SequentialLayer(layer_list) + +def restore_model(model, layer_type): + logging.info("Restore pre-trained weights") + # download weights + maybe_download_and_extract(model_saved_name[layer_type], 'model', model_urls[layer_type]) + weights = [] + if layer_type == 'vgg16': + npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True) + # get weight list + for val in sorted(npz.items()): + logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0])) + weights.append(val[1]) + if len(model.all_weights) == len(weights): + break + elif layer_type == 'vgg19': + npz = np.load(os.path.join('model', model_saved_name[layer_type]), allow_pickle=True, encoding='latin1').item() + # get weight list + for val in sorted(npz.items()): + logging.info(" Loading %s in %s" % (str(val[1][0].shape), val[0])) + logging.info(" Loading %s in %s" % (str(val[1][1].shape), val[0])) + weights.extend(val[1]) + if len(model.all_weights) == len(weights): + break + # assign weight values + assign_weights(weights, model) + del weights + +def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): + """Pre-trained VGG16 model. + + Parameters + ------------ + pretrained : boolean + Whether to load pretrained weights. Default False. + end_with : str + The end point of the model. Default ``fc3_relu`` i.e. the whole model. + mode : str. + Model building mode, 'dynamic' or 'static'. Default 'dynamic'. + name : None or str + A unique layer name. + + Examples + --------- + Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py `__ + With TensorLayer + TODO Modify the usage example according to the model storage location + >>> # get the whole model, without pre-trained VGG parameters + >>> vgg = tl.models.vgg16() + >>> # get the whole model, restore pre-trained VGG parameters + >>> vgg = tl.models.vgg16(pretrained=True) + >>> # use for inferencing + >>> output = vgg(img, is_train=False) + >>> probs = tf.nn.softmax(output)[0].numpy() + + Extract features with VGG16 and Train a classifier with 100 classes + + >>> # get VGG without the last layer + >>> cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() + >>> # add one more layer and build a new model + >>> ni = Input([None, 224, 224, 3], name="inputs") + >>> nn = cnn(ni) + >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) + >>> model = tl.models.Model(inputs=ni, outputs=nn) + >>> # train your own classifier (only update the last layer) + >>> train_params = model.get_layer('out').trainable_weights + + Reuse model + + >>> # in dynamic model, we can directly use the same model + >>> # in static model + >>> vgg_layer = tl.models.vgg16().as_layer() + >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) + >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) + >>> a_1 = vgg_layer(ni_1) + >>> a_2 = vgg_layer(ni_2) + >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + + """ + if mode == 'dynamic': + model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) + elif mode == 'static': + raise NotImplementedError + else: + raise Exception("No such mode %s" % mode) + if pretrained: + restore_model(model, layer_type='vgg16') + return model + + +def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None): + """Pre-trained VGG19 model. + + Parameters + ------------ + pretrained : boolean + Whether to load pretrained weights. Default False. + end_with : str + The end point of the model. Default ``fc3_relu`` i.e. the whole model. + mode : str. + Model building mode, 'dynamic' or 'static'. Default 'dynamic'. + name : None or str + A unique layer name. + + Examples + --------- + Classify ImageNet classes with VGG19, see `tutorial_models_vgg.py `__ + With TensorLayer + + >>> # get the whole model, without pre-trained VGG parameters + >>> vgg = tl.models.vgg19() + >>> # get the whole model, restore pre-trained VGG parameters + >>> vgg = tl.models.vgg19(pretrained=True) + >>> # use for inferencing + >>> output = vgg(img, is_train=False) + >>> probs = tf.nn.softmax(output)[0].numpy() + + Extract features with VGG19 and Train a classifier with 100 classes + + >>> # get VGG without the last layer + >>> cnn = tl.models.vgg19(end_with='fc2_relu', mode='static').as_layer() + >>> # add one more layer and build a new model + >>> ni = Input([None, 224, 224, 3], name="inputs") + >>> nn = cnn(ni) + >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) + >>> model = tl.models.Model(inputs=ni, outputs=nn) + >>> # train your own classifier (only update the last layer) + >>> train_params = model.get_layer('out').trainable_weights + + Reuse model + + >>> # in dynamic model, we can directly use the same model + >>> # in static model + >>> vgg_layer = tl.models.vgg19().as_layer() + >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) + >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) + >>> a_1 = vgg_layer(ni_1) + >>> a_2 = vgg_layer(ni_2) + >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + + """ + if mode == 'dynamic': + model = VGG(layer_type='vgg19', batch_norm=False, end_with=end_with, name=name) + elif mode == 'static': + raise NotImplementedError + else: + raise Exception("No such mode %s" % mode) + if pretrained: + restore_model(model, layer_type='vgg19') + return model + + +VGG16 = vgg16 +VGG19 = vgg19 + +# models without pretrained parameters +# def vgg11(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg11', batch_norm=False, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model +# +# +# def vgg11_bn(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg11_bn', batch_norm=True, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model +# +# +# def vgg13(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg13', batch_norm=False, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model +# +# +# def vgg13_bn(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg13_bn', batch_norm=True, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model +# +# +# def vgg16_bn(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg16_bn', batch_norm=True, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model +# +# +# def vgg19_bn(pretrained=False, end_with='outputs'): +# model = VGG(layer_type='vgg19_bn', batch_norm=True, end_with=end_with) +# if pretrained: +# model.restore_weights() +# return model diff --git a/examples/model_zoo/yolo.py b/examples/model_zoo/yolo.py new file mode 100644 index 000000000..d3784b2bd --- /dev/null +++ b/examples/model_zoo/yolo.py @@ -0,0 +1,376 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +"""YOLOv4 for MS-COCO. + +# Reference: +- [tensorflow-yolov4-tflite]( + https://github.com/hunglc007/tensorflow-yolov4-tflite) + +""" + +import numpy as np +import tensorlayer as tl +from tensorlayer.layers.activation import Mish +from tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSampling2d, Concat, Elementwise +from tensorlayer.layers import Module, SequentialLayer +from tensorlayer import logging + +INPUT_SIZE = 416 +weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} + + +class Convolutional(Module): + """ + Create Convolution layer + Because it is only a stack of reference layers, there is no build, so self._built=True + """ + def __init__(self, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky',name=None): + super(Convolutional, self).__init__() + self.act = activate + self.act_type = activate_type + self.downsample = downsample + self.bn = bn + self._built = True + if downsample: + padding = 'VALID' + strides = 2 + else: + strides = 1 + padding = 'SAME' + + if bn: + b_init = None + else: + b_init = tl.initializers.constant(value=0.0) + + self.zeropad = ZeroPad2d(((1, 0), (1, 0))) + self.conv = Conv2d(n_filter=filters_shape[-1], in_channels=filters_shape[2], filter_size=(filters_shape[0], filters_shape[1]), + strides=(strides, strides),padding=padding, b_init=b_init, name=name) + + if bn: + if activate == True: + if activate_type == 'leaky': + self.batchnorm2d = BatchNorm2d(act='leaky_relu0.1', num_features=filters_shape[-1]) + elif activate_type == 'mish': + self.batchnorm2d = BatchNorm2d(act=Mish, num_features=filters_shape[-1]) + else: + self.batchnorm2d = BatchNorm2d(act=None, num_features=filters_shape[-1]) + + def forward(self, input): + if self.downsample: + input = self.zeropad(input) + + output = self.conv(input) + + if self.bn: + output = self.batchnorm2d(output) + return output + +class residual_block(Module): + def __init__(self, input_channel, filter_num1, filter_num2, activate_type='leaky'): + super(residual_block, self).__init__() + self.conv1 = Convolutional(filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type) + self.conv2 = Convolutional(filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type) + self.add = Elementwise(tl.add) + + def forward(self, inputs): + output = self.conv1(inputs) + output = self.conv2(output) + output = self.add([inputs, output]) + return output + +def residual_block_num(num, input_channel, filter_num1, filter_num2, activate_type='leaky'): + residual_list = [] + for i in range(num): + residual_list.append(residual_block(input_channel, filter_num1, filter_num2, activate_type=activate_type)) + return SequentialLayer(residual_list) + +class cspdarknet53(Module): + def __init__(self): + super(cspdarknet53, self).__init__() + self._built = True + self.conv1_1 = Convolutional((3, 3, 3, 32), activate_type='mish') + self.conv1_2 = Convolutional((3, 3, 32, 64), downsample=True, activate_type='mish') + self.conv1_3 = Convolutional((1, 1, 64, 64), activate_type='mish', name='conv_rote_block_1') + self.conv1_4 = Convolutional((1, 1, 64, 64), activate_type='mish') + self.residual_1 = residual_block_num(1, 64, 32, 64, activate_type="mish") + + self.conv2_1 = Convolutional((1, 1, 64, 64), activate_type='mish') + self.concat = Concat() + self.conv2_2 = Convolutional((1, 1, 128, 64), activate_type='mish') + self.conv2_3 = Convolutional((3, 3, 64, 128), downsample=True, activate_type='mish') + self.conv2_4 = Convolutional((1, 1, 128, 64), activate_type='mish', name='conv_rote_block_2') + self.conv2_5 = Convolutional((1, 1, 128, 64), activate_type='mish') + self.residual_2 = residual_block_num(2, 64, 64, 64, activate_type='mish') + + self.conv3_1 = Convolutional((1, 1, 64, 64), activate_type='mish') + self.conv3_2 = Convolutional((1, 1, 128, 128), activate_type='mish') + self.conv3_3 = Convolutional((3, 3, 128, 256), downsample=True, activate_type='mish') + self.conv3_4 = Convolutional((1, 1, 256, 128), activate_type='mish', name='conv_rote_block_3') + self.conv3_5 = Convolutional((1, 1, 256, 128), activate_type='mish') + self.residual_3 = residual_block_num(8, 128, 128, 128, activate_type="mish") + + self.conv4_1 = Convolutional((1, 1, 128, 128), activate_type='mish') + self.conv4_2 = Convolutional((1, 1, 256, 256), activate_type='mish') + self.conv4_3 = Convolutional((3, 3, 256, 512), downsample=True, activate_type='mish') + self.conv4_4 = Convolutional((1, 1, 512, 256), activate_type='mish', name='conv_rote_block_4') + self.conv4_5 = Convolutional((1, 1, 512, 256), activate_type='mish') + self.residual_4 = residual_block_num(8, 256, 256, 256, activate_type="mish") + + self.conv5_1 = Convolutional((1, 1, 256, 256), activate_type='mish') + self.conv5_2 = Convolutional((1, 1, 512, 512), activate_type='mish') + self.conv5_3 = Convolutional((3, 3, 512, 1024), downsample=True, activate_type='mish') + self.conv5_4 = Convolutional((1, 1, 1024, 512), activate_type='mish', name='conv_rote_block_5') + self.conv5_5 = Convolutional((1, 1, 1024, 512), activate_type='mish') + self.residual_5 = residual_block_num(4, 512, 512, 512, activate_type="mish") + + + self.conv6_1 = Convolutional((1, 1, 512, 512), activate_type='mish') + self.conv6_2 = Convolutional((1, 1, 1024, 1024), activate_type='mish') + self.conv6_3 = Convolutional((1, 1, 1024, 512)) + self.conv6_4 = Convolutional((3, 3, 512, 1024)) + self.conv6_5 = Convolutional((1, 1, 1024, 512)) + + self.maxpool1 = MaxPool2d(filter_size=(13, 13), strides=(1, 1)) + self.maxpool2 = MaxPool2d(filter_size=(9, 9), strides=(1, 1)) + self.maxpool3 = MaxPool2d(filter_size=(5, 5), strides=(1, 1)) + + self.conv7_1 = Convolutional((1, 1, 2048, 512)) + self.conv7_2 = Convolutional((3, 3, 512, 1024)) + self.conv7_3 = Convolutional((1, 1, 1024, 512)) + + def forward(self, input_data): + input_data = self.conv1_1(input_data) + input_data = self.conv1_2(input_data) + route = input_data + route = self.conv1_3(route) + input_data = self.conv1_4(input_data) + input_data = self.residual_1(input_data) + + input_data = self.conv2_1(input_data) + input_data = self.concat([input_data, route]) + input_data = self.conv2_2(input_data) + input_data = self.conv2_3(input_data) + route = input_data + route = self.conv2_4(route) + input_data = self.conv2_5(input_data) + input_data = self.residual_2(input_data) + + input_data = self.conv3_1(input_data) + input_data = self.concat([input_data, route]) + input_data = self.conv3_2(input_data) + input_data = self.conv3_3(input_data) + route = input_data + route = self.conv3_4(route) + input_data = self.conv3_5(input_data) + input_data = self.residual_3(input_data) + + input_data = self.conv4_1(input_data) + input_data = self.concat([input_data, route]) + input_data = self.conv4_2(input_data) + route_1 = input_data + input_data = self.conv4_3(input_data) + route = input_data + route = self.conv4_4(route) + input_data = self.conv4_5(input_data) + input_data = self.residual_4(input_data) + + input_data = self.conv5_1(input_data) + input_data = self.concat([input_data, route]) + input_data = self.conv5_2(input_data) + route_2 = input_data + input_data = self.conv5_3(input_data) + route = input_data + route = self.conv5_4(route) + input_data = self.conv5_5(input_data) + input_data = self.residual_5(input_data) + + input_data = self.conv6_1(input_data) + input_data = self.concat([input_data, route]) + + input_data = self.conv6_2(input_data) + input_data = self.conv6_3(input_data) + input_data = self.conv6_4(input_data) + input_data = self.conv6_5(input_data) + + maxpool1 = self.maxpool1(input_data) + maxpool2 = self.maxpool2(input_data) + maxpool3 = self.maxpool3(input_data) + input_data = self.concat([maxpool1, maxpool2, maxpool3, input_data]) + + input_data = self.conv7_1(input_data) + input_data = self.conv7_2(input_data) + input_data = self.conv7_3(input_data) + + return route_1, route_2, input_data + + +class YOLOv4_model(Module): + def __init__(self, NUM_CLASS): + super(YOLOv4_model, self).__init__() + self.cspdarnnet = cspdarknet53() + + self.conv1_1 = Convolutional((1, 1, 512, 256)) + self.upsamle = UpSampling2d(scale=2) + self.conv1_2 = Convolutional((1, 1, 512, 256), name='conv_yolo_1') + self.concat = Concat() + + self.conv2_1 = Convolutional((1, 1, 512, 256)) + self.conv2_2 = Convolutional((3, 3, 256, 512)) + self.conv2_3 = Convolutional((1, 1, 512, 256)) + self.conv2_4 = Convolutional((3, 3, 256, 512)) + self.conv2_5 = Convolutional((1, 1, 512, 256)) + + self.conv3_1 = Convolutional((1, 1, 256, 128)) + self.conv3_2 = Convolutional((1, 1, 256, 128), name='conv_yolo_2') + + self.conv4_1 = Convolutional((1, 1, 256, 128)) + self.conv4_2 = Convolutional((3, 3, 128, 256)) + self.conv4_3 = Convolutional((1, 1, 256, 128)) + self.conv4_4 = Convolutional((3, 3, 128, 256)) + self.conv4_5 = Convolutional((1, 1, 256, 128)) + + self.conv5_1 = Convolutional((3, 3, 128, 256), name='conv_route_1') + self.conv5_2 = Convolutional((1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False) + + self.conv6_1 = Convolutional((3, 3, 128, 256), downsample=True, name='conv_route_2') + self.conv6_2 = Convolutional((1, 1, 512, 256)) + self.conv6_3 = Convolutional((3, 3, 256, 512)) + self.conv6_4 = Convolutional((1, 1, 512, 256)) + self.conv6_5 = Convolutional((3, 3, 256, 512)) + self.conv6_6 = Convolutional((1, 1, 512, 256)) + + self.conv7_1 = Convolutional((3, 3, 256, 512), name='conv_route_3') + self.conv7_2 = Convolutional((1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False) + self.conv7_3 = Convolutional((3, 3, 256, 512), downsample=True, name='conv_route_4') + + self.conv8_1 = Convolutional((1, 1, 1024, 512)) + self.conv8_2 = Convolutional((3, 3, 512, 1024)) + self.conv8_3 = Convolutional((1, 1, 1024, 512)) + self.conv8_4 = Convolutional((3, 3, 512, 1024)) + self.conv8_5 = Convolutional((1, 1, 1024, 512)) + + self.conv9_1 = Convolutional((3, 3, 512, 1024)) + self.conv9_2 = Convolutional((1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False) + + def forward(self, inputs): + route_1, route_2, conv = self.cspdarnnet(inputs) + + route = conv + conv = self.conv1_1(conv) + conv = self.upsamle(conv) + route_2 = self.conv1_2(route_2) + conv = self.concat([route_2, conv]) + + conv = self.conv2_1(conv) + conv = self.conv2_2(conv) + conv = self.conv2_3(conv) + conv = self.conv2_4(conv) + conv = self.conv2_5(conv) + + route_2 = conv + conv = self.conv3_1(conv) + conv = self.upsamle(conv) + route_1 = self.conv3_2(route_1) + conv = self.concat([route_1, conv]) + + conv = self.conv4_1(conv) + conv = self.conv4_2(conv) + conv = self.conv4_3(conv) + conv = self.conv4_4(conv) + conv = self.conv4_5(conv) + + route_1 = conv + conv = self.conv5_1(conv) + conv_sbbox = self.conv5_2(conv) + + conv = self.conv6_1(route_1) + conv = self.concat([conv, route_2]) + + conv = self.conv6_2(conv) + conv = self.conv6_3(conv) + conv = self.conv6_4(conv) + conv = self.conv6_5(conv) + conv = self.conv6_6(conv) + + route_2 = conv + conv = self.conv7_1(conv) + conv_mbbox = self.conv7_2(conv) + conv = self.conv7_3(route_2) + conv = self.concat([conv, route]) + + conv = self.conv8_1(conv) + conv = self.conv8_2(conv) + conv = self.conv8_3(conv) + conv = self.conv8_4(conv) + conv = self.conv8_5(conv) + + conv = self.conv9_1(conv) + conv_lbbox = self.conv9_2(conv) + + return conv_sbbox, conv_mbbox, conv_lbbox + +def YOLOv4(NUM_CLASS, pretrained=False): + """Pre-trained YOLOv4 model. + + Parameters + ------------ + NUM_CLASS : int + Number of classes in final prediction. + pretrained : boolean + Whether to load pretrained weights. Default False. + + Examples + --------- + Object Detection with YOLOv4, see `computer_vision.py + `__ + With TensorLayer + + >>> # get the whole model, without pre-trained YOLOv4 parameters + >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=False) + >>> # get the whole model, restore pre-trained YOLOv4 parameters + >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=True) + >>> # use for inferencing + >>> output = yolov4(img, is_train=False) + + """ + + network = YOLOv4_model(NUM_CLASS=NUM_CLASS) + + if pretrained: + restore_params(network, model_path='model/yolov4_model.npz') + + return network + + +def restore_params(network, model_path='models.npz'): + logging.info("Restore pre-trained weights") + + try: + npz = np.load(model_path, allow_pickle=True) + except: + print("Download the model file, placed in the /model ") + print("Weights download: ", weights_url['link'], "password:", weights_url['password']) + + txt_path = 'model/yolov4_weights3_config.txt' + f = open(txt_path, "r") + line = f.readlines() + for i in range(len(line)): + network.all_weights[i].assign(npz[line[i].strip()]) + logging.info(" Loading weights %s in %s" % (network.all_weights[i].shape, network.all_weights[i].name)) + +def tl2_weights_to_tl3_weights(weights_2_path='model/weights_2.txt', weights_3_path='model/weights_3.txt', txt_path='model/yolov4_weights_config.txt'): + weights_2_path = weights_2_path + weights_3_path = weights_3_path + txt_path = txt_path + f1 = open(weights_2_path, "r") + f2 = open(weights_3_path, "r") + f3 = open(txt_path, "r") + line1 = f1.readlines() + line2 = f2.readlines() + line3 = f3.readlines() + _dicts = {} + for i in range(len(line1)): + _dicts[line1[i].strip()] = line3[i].strip() + for j in range(len(line2)): + print(_dicts[line2[j].strip()]) From 9ae91a2a04a166bb6b328bd6033ce360571d2970 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 19 Apr 2021 11:39:18 +0800 Subject: [PATCH 06/36] update unit testing --- tests/files/test_utils_saveload.py | 6 +- tests/layers/test_layernode.py | 238 --------- tests/layers/test_layers_activation.py | 256 +++------ tests/layers/test_layers_convolution.py | 491 +++--------------- tests/layers/test_layers_core_act.py | 145 +++--- .../test_layers_core_basedense_dropout.py | 181 ++----- tests/layers/test_layers_core_nested.py | 77 +-- .../test_layers_deformable_convolution.py | 51 +- tests/layers/test_layers_dense.py | 288 ++-------- tests/layers/test_layers_embedding.py | 44 +- tests/layers/test_layers_extend.py | 11 +- tests/layers/test_layers_lambda.py | 32 +- tests/layers/test_layers_merge.py | 31 +- tests/layers/test_layers_noise.py | 11 +- tests/layers/test_layers_normalization.py | 85 +-- tests/layers/test_layers_padding.py | 18 +- tests/layers/test_layers_pooling.py | 43 +- tests/layers/test_layers_recurrent.py | 7 +- tests/layers/test_layers_resampling.py | 19 +- tests/layers/test_layers_scale.py | 30 +- tests/layers/test_layers_shape.py | 43 +- tests/layers/test_layers_stack.py | 48 +- tests/models/test_auto_naming.py | 6 +- tests/models/test_keras_save.py | 6 +- tests/models/test_model_core.py | 6 +- tests/models/test_model_save.py | 7 +- tests/models/test_model_save_graph.py | 6 +- tests/models/test_seq2seq_model.py | 11 +- tests/models/test_seq2seq_with_attention.py | 11 +- tests/pending/test_array_ops.py | 9 +- tests/pending/test_decorators.py | 7 +- tests/pending/test_documentation.py | 4 +- tests/pending/test_layers_basic.py | 6 +- tests/pending/test_layers_flow_control.py | 6 +- tests/pending/test_layers_importer.py | 13 +- tests/pending/test_layers_normalization.py | 6 +- tests/pending/test_layers_padding.py | 6 +- .../test_layers_spatial_transformer.py | 10 +- tests/pending/test_layers_stack.py | 6 +- tests/pending/test_layers_super_resolution.py | 6 +- tests/pending/test_layers_time_distributed.py | 6 +- tests/pending/test_logging.py | 6 +- tests/pending/test_logging_hyperdash.py | 10 +- tests/pending/test_mnist_simple.py | 10 +- tests/pending/test_models.py | 8 +- tests/pending/test_optimizer_amsgrad.py | 6 +- tests/pending/test_pydocstyle.py | 6 +- tests/pending/test_reuse_mlp.py | 6 +- tests/pending/test_tf_layers.py | 6 +- tests/pending/test_timeout.py | 14 +- tests/pending/test_utils_predict.py | 7 +- tests/pending/test_yapf_format.py | 5 +- tests/performance_test/vgg/keras_test.py | 8 +- tests/performance_test/vgg/pytorch_test.py | 12 +- tests/performance_test/vgg/tf2-autograph.py | 8 +- tests/performance_test/vgg/tf2-eager.py | 8 +- tests/performance_test/vgg/tl2-autograph.py | 6 +- tests/performance_test/vgg/tl2-eager.py | 6 +- .../vgg/tl2-static-autograph.py | 6 +- .../performance_test/vgg/tl2-static-eager.py | 6 +- tests/test_activations.py | 14 +- tests/test_initializers.py | 8 +- tests/test_nlp.py | 11 +- tests/utils/__init__.py | 5 +- tests/utils/custom_layers/__init__.py | 2 +- tests/utils/custom_layers/basic_layers.py | 14 +- tests/utils/custom_layers/inception_blocks.py | 2 +- tests/utils/custom_networks/__init__.py | 2 +- tests/utils/custom_networks/inceptionv4.py | 17 +- 69 files changed, 678 insertions(+), 1834 deletions(-) delete mode 100644 tests/layers/test_layernode.py diff --git a/tests/files/test_utils_saveload.py b/tests/files/test_utils_saveload.py index ea51b0ff4..58a1d374a 100644 --- a/tests/files/test_utils_saveload.py +++ b/tests/files/test_utils_saveload.py @@ -4,15 +4,15 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf - import tensorlayer as tl from tensorlayer.layers import * from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def basic_static_model(): diff --git a/tests/layers/test_layernode.py b/tests/layers/test_layernode.py deleted file mode 100644 index 957857f9a..000000000 --- a/tests/layers/test_layernode.py +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import os -import unittest - -import numpy as np -import tensorflow as tf -from tensorflow.python.ops.rnn_cell import LSTMCell - -import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import Model -from tests.utils import CustomTestCase - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - - -class LayerNode_Test(CustomTestCase): - - @classmethod - def setUpClass(cls): - pass - - @classmethod - def tearDownClass(cls): - pass - - def test_net1(self): - print('-' * 20, 'test_net1', '-' * 20) - - def get_model(input_shape): - ni = Input(input_shape) - - nii = Conv2d(32, filter_size=(3, 3), strides=(1, 1), name='conv1')(ni) - nn = Dropout(keep=0.9, name='drop1')(nii) - - conv = Conv2d(32, filter_size=(3, 3), strides=(1, 1), name='conv2') - tt = conv(nn) # conv2_node_0 - nn = conv(nn) # conv2_node_1 - - # a branch - na = Conv2d(64, filter_size=(3, 3), strides=(1, 1), name='conv3')(nn) - na = MaxPool2d(name='pool1')(na) - - # b branch - nb = MaxPool2d(name='pool2')(nn) - nb = conv(nb) # conv2_node_2 - - out = Concat(name='concat')([na, nb]) - M = Model(inputs=ni, outputs=[out, nn, nb]) - - gg = conv(nii) # this node will not be added since model fixed - - return M - - net = get_model([None, 24, 24, 3]) - - for k, v in enumerate(net._node_by_depth): - print(k, [x.name for x in v], [x.in_tensors_idxes for x in v]) - - all_node_names = [] - for k, v in enumerate(net._node_by_depth): - all_node_names.extend([x.name for x in v]) - - self.assertNotIn('conv2_node_0', all_node_names) - self.assertNotIn('conv2_node_3', all_node_names) - - self.assertEqual(len(net.all_layers), 8) - print(net.all_layers) - - data = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32) - out, nn, nb = net(data, is_train=True) - - self.assertEqual(nn.shape, [2, 24, 24, 32]) - self.assertEqual(nb.shape, [2, 12, 12, 32]) - - def test_net2(self): - print('-' * 20, 'test_net2', '-' * 20) - - def get_unstack_model(input_shape): - ni = Input(input_shape) - - nn = Dropout(keep=0.9)(ni) - - a, b, c = UnStack(axis=-1)(nn) - - b = Flatten()(b) - b = Dense(10)(b) - - c = Flatten()(c) - - M = Model(inputs=ni, outputs=[a, b, c]) - return M - - net = get_unstack_model([None, 24, 24, 3]) - - for k, v in enumerate(net._node_by_depth): - print(k, [x.name for x in v], [x.in_tensors_idxes for x in v]) - - data = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32) - out = net(data, is_train=True) - - self.assertEqual(len(out), 3) - - def test_word2vec(self): - print('-' * 20, 'test_word2vec', '-' * 20) - - def get_word2vec(): - vocabulary_size = 800 - batch_size = 10 - embedding_size = 60 - num_sampled = 25 - inputs = tl.layers.Input([batch_size], dtype=tf.int32) - labels = tl.layers.Input([batch_size, 1], dtype=tf.int32) - - emb_net = tl.layers.Word2vecEmbedding( - vocabulary_size=vocabulary_size, - embedding_size=embedding_size, - num_sampled=num_sampled, - activate_nce_loss=True, # nce loss is activated - nce_loss_args={}, - E_init=tl.initializers.random_uniform(minval=-1.0, maxval=1.0), - nce_W_init=tl.initializers.truncated_normal(stddev=float(1.0 / np.sqrt(embedding_size))), - nce_b_init=tl.initializers.constant(value=0.0), - name='word2vec_layer', - ) - emb, nce = emb_net([inputs, labels]) - - model = tl.models.Model(inputs=[inputs, labels], outputs=[emb, nce]) - return model - - net = get_word2vec() - - for k, v in enumerate(net._node_by_depth): - print(k, [x.name for x in v], [x.in_tensors_idxes for x in v]) - - x = tf.ones(shape=(10, ), dtype=tf.int32) - y = tf.ones(shape=(10, 1), dtype=tf.int32) - out = net([x, y], is_train=True) - - self.assertEqual(len(out), 2) - - def test_layerlist(self): - print('-' * 20, 'layerlist', '-' * 20) - - class MyModel(Model): - - def __init__(self): - super(MyModel, self).__init__() - self.layers = LayerList([Dense(50, in_channels=100), Dropout(0.9), Dense(10, in_channels=50)]) - - def forward(self, x): - return self.layers(x) - - net = MyModel() - self.assertEqual(net._nodes_fixed, False) - - data = np.random.normal(size=[4, 100]).astype(np.float32) - out = net(data, is_train=False) - - self.assertEqual(net._nodes_fixed, True) - self.assertEqual(net.layers._nodes_fixed, True) - self.assertEqual(net.layers[0]._nodes_fixed, True) - self.assertEqual(net.layers[1]._nodes_fixed, True) - self.assertEqual(net.layers[2]._nodes_fixed, True) - - def test_ModelLayer(self): - print('-' * 20, 'ModelLayer', '-' * 20) - - def MyModel(): - nii = Input(shape=[None, 100]) - nn = Dense(50, in_channels=100)(nii) - nn = Dropout(0.9)(nn) - nn = Dense(10)(nn) - M = Model(inputs=nii, outputs=nn) - return M - - mlayer = MyModel().as_layer() - - ni = Input(shape=[None, 100]) - nn = mlayer(ni) - nn = Dense(5)(nn) - net = Model(inputs=ni, outputs=nn) - - self.assertEqual(net._nodes_fixed, True) - - data = np.random.normal(size=[4, 100]).astype(np.float32) - out = net(data, is_train=False) - - self.assertEqual(net._nodes_fixed, True) - self.assertEqual(net.all_layers[1]._nodes_fixed, True) - self.assertEqual(net.all_layers[1].model._nodes_fixed, True) - self.assertEqual(net.all_layers[1].model.all_layers[0]._nodes_fixed, True) - - def test_STN(self): - print('-' * 20, 'test STN', '-' * 20) - - def get_model(inputs_shape): - ni = Input(inputs_shape) - - ## 1. Localisation network - # use MLP as the localisation net - nn = Flatten()(ni) - nn = Dense(n_units=20, act=tf.nn.tanh)(nn) - nn = Dropout(keep=0.8)(nn) - # you can also use CNN instead for MLP as the localisation net - - ## 2. Spatial transformer module (sampler) - stn = SpatialTransformer2dAffine(out_size=(40, 40), in_channels=20) - # s = stn((nn, ni)) - nn = stn((nn, ni)) - s = nn - - ## 3. Classifier - nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn) - nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn) - nn = Flatten()(nn) - nn = Dense(n_units=1024, act=tf.nn.relu)(nn) - nn = Dense(n_units=10, act=tf.identity)(nn) - - M = Model(inputs=ni, outputs=[nn, s]) - return M - - net = get_model([None, 40, 40, 1]) - - inputs = np.random.randn(2, 40, 40, 1).astype(np.float32) - o1, o2 = net(inputs, is_train=True) - self.assertEqual(o1.shape, (2, 10)) - self.assertEqual(o2.shape, (2, 40, 40, 1)) - - self.assertEqual(len(net._node_by_depth), 10) - - -if __name__ == '__main__': - - tl.logging.set_verbosity(tl.logging.DEBUG) - - unittest.main() diff --git a/tests/layers/test_layers_activation.py b/tests/layers/test_layers_activation.py index cb04233b3..fcbe690e6 100644 --- a/tests/layers/test_layers_activation.py +++ b/tests/layers/test_layers_activation.py @@ -4,212 +4,120 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Activation_Layer_Test(CustomTestCase): @classmethod - def setUpClass(cls): - cls.data = (10 + 10) * np.random.random(size=[10, 5]).astype(np.float32) - 10 - cls.data2 = (10 + 10) * np.random.random(size=[10, 10, 5]).astype(np.float32) - 10 + def setUpClass(self): + self.inputs = tl.layers.Input([10, 5]) @classmethod - def tearDownClass(cls): + def tearDownClass(self): pass def test_prelu_1(self): - inputs = tl.layers.Input([10, 5]) prelulayer = tl.layers.PRelu(channel_shared=True) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) + class prelu_model(tl.layers.Module): + def __init__(self): + super(prelu_model, self).__init__() + self.prelu = prelulayer - print(prelulayer) + def forward(self, inputs): + return self.prelu(inputs) + net = prelu_model() - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0: - gt[i][j] = self.data[i][j] - else: - gt[i][j] = prelulayer.alpha_var_constrained.numpy() * self.data[i][j] - - self.assertTrue(np.array_equal(out.numpy(), gt)) + self.assertTrue(tl.get_tensor_shape(net(self.inputs)), [10, 5]) def test_prelu_2(self): - inputs = tl.layers.Input([10, 5]) prelulayer = tl.layers.PRelu(in_channels=5) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) + prelu = prelulayer(self.inputs) - print(prelulayer) + self.assertTrue(tl.get_tensor_shape(prelu), [10, 5]) - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0: - gt[i][j] = self.data[i][j] - else: - gt[i][j] = prelulayer.alpha_var_constrained.numpy()[j] * self.data[i][j] + def test_prelu6_1(self): + prelu6layer = tl.layers.PRelu6(in_channels=5) + prelu6 = prelu6layer(self.inputs) - self.assertTrue(np.array_equal(out.numpy(), gt)) + self.assertTrue(tl.get_tensor_shape(prelu6), [10, 5]) - def test_prelu_3(self): - inputs = tl.layers.Input([10, 10, 5]) - prelulayer = tl.layers.PRelu(in_channels=5) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data2, is_train=True) - print(prelulayer) + def test_prelu6_2(self): + prelu6layer = tl.layers.PRelu6(channel_shared=True) - gt = np.zeros(shape=self.data2.shape) - for i in range(len(gt)): - for k in range(len(gt[i])): - for j in range(len(gt[i][k])): - if self.data2[i][k][j] >= 0: - gt[i][k][j] = self.data2[i][k][j] - else: - gt[i][k][j] = prelulayer.alpha_var_constrained.numpy()[j] * self.data2[i][k][j] + class prelu6_model(tl.layers.Module): + def __init__(self): + super(prelu6_model, self).__init__() + self.prelu = prelu6layer - self.assertTrue(np.array_equal(out.numpy(), gt)) + def forward(self, inputs): + return self.prelu(inputs) - def test_prelu6_1(self): - inputs = tl.layers.Input([10, 5]) - prelulayer = tl.layers.PRelu6(channel_shared=True) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0 and self.data[i][j] <= 6: - gt[i][j] = self.data[i][j] - elif self.data[i][j] > 6: - gt[i][j] = 6 - else: - gt[i][j] = prelulayer.alpha_var_constrained.numpy() * self.data[i][j] - - self.assertTrue(np.array_equal(out.numpy(), gt)) + net = prelu6_model() - def test_prelu6_2(self): - inputs = tl.layers.Input([10, 5]) - prelulayer = tl.layers.PRelu6(in_channels=5) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0 and self.data[i][j] <= 6: - gt[i][j] = self.data[i][j] - elif self.data[i][j] > 6: - gt[i][j] = 6 - else: - gt[i][j] = prelulayer.alpha_var_constrained.numpy()[j] * self.data[i][j] - - self.assertTrue(np.array_equal(out.numpy(), gt)) - - def test_prelu6_3(self): - inputs = tl.layers.Input([10, 10, 5]) - prelulayer = tl.layers.PRelu6(in_channels=5) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data2, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data2.shape) - for i in range(len(gt)): - for k in range(len(gt[i])): - for j in range(len(gt[i][k])): - if self.data2[i][k][j] >= 0 and self.data2[i][k][j] <= 6: - gt[i][k][j] = self.data2[i][k][j] - elif self.data2[i][k][j] > 6: - gt[i][k][j] = 6 - else: - gt[i][k][j] = prelulayer.alpha_var_constrained.numpy()[j] * self.data2[i][k][j] - - self.assertTrue(np.array_equal(out.numpy(), gt)) + self.assertTrue(tl.get_tensor_shape(net(self.inputs)), [10, 5]) def test_ptrelu6_1(self): - inputs = tl.layers.Input([10, 5]) - prelulayer = tl.layers.PTRelu6(channel_shared=True) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0 and self.data[i][j] <= 6: - gt[i][j] = self.data[i][j] - elif self.data[i][j] > 6: - gt[i][j] = 6 + prelulayer.alpha_high_constrained.numpy() * (self.data[i][j] - 6) - else: - gt[i][j] = prelulayer.alpha_low_constrained.numpy() * self.data[i][j] - - # FIXME: Figure out why this assert randomly fail in CI. - # self.assertTrue(np.array_equal(out.numpy(), gt)) + ptrelu6layer = tl.layers.PTRelu6(channel_shared=True) + ptrelu6 = ptrelu6layer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(ptrelu6), [10, 5]) def test_ptrelu6_2(self): - inputs = tl.layers.Input([10, 5]) - prelulayer = tl.layers.PTRelu6(in_channels=5) - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data.shape) - for i in range(len(gt)): - for j in range(len(gt[i])): - if self.data[i][j] >= 0 and self.data[i][j] <= 6: - gt[i][j] = self.data[i][j] - elif self.data[i][j] > 6: - gt[i][j] = 6 + prelulayer.alpha_high_constrained.numpy()[j] * (self.data[i][j] - 6) - else: - gt[i][j] = prelulayer.alpha_low_constrained.numpy()[j] * self.data[i][j] - - self.assertTrue(np.allclose(out.numpy(), gt)) - - def test_ptrelu6_3(self): - inputs = tl.layers.Input([3, 2, 5]) - prelulayer = tl.layers.PTRelu6() - prelu = prelulayer(inputs) - model = tl.models.Model(inputs=inputs, outputs=prelu) - out = model(self.data2, is_train=True) - - print(prelulayer) - - gt = np.zeros(shape=self.data2.shape) - for i in range(len(gt)): - for k in range(len(gt[i])): - for j in range(len(gt[i][k])): - if self.data2[i][k][j] >= 0 and self.data2[i][k][j] <= 6: - gt[i][k][j] = self.data2[i][k][j] - elif self.data2[i][k][j] > 6: - gt[i][k][j] = 6 + prelulayer.alpha_high_constrained.numpy()[j] * (self.data2[i][k][j] - 6) - else: - gt[i][k][j] = prelulayer.alpha_low_constrained.numpy()[j] * self.data2[i][k][j] - - self.assertTrue(np.allclose(out.numpy(), gt)) + ptrelu6layer = tl.layers.PTRelu6(in_channels=5) + + class ptrelu6_model(tl.layers.Module): + def __init__(self): + super(ptrelu6_model, self).__init__() + self.prelu = ptrelu6layer + + def forward(self, inputs): + return self.prelu(inputs) + + net = ptrelu6_model() + + self.assertTrue(tl.get_tensor_shape(net(self.inputs)), [10, 5]) + + def test_lrelu(self): + lrelulayer = tl.layers.LeakyReLU(alpha=0.5) + lrelu = lrelulayer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(lrelu), [5, 10]) + + def test_lrelu6(self): + lrelu6layer = tl.layers.LeakyReLU6(alpha=0.5) + lrelu6 = lrelu6layer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(lrelu6), [5, 10]) + + def test_ltrelu6(self): + ltrelu6layer = tl.layers.LeakyTwiceRelu6() + ltrelu6 = ltrelu6layer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(ltrelu6), [5, 10]) + + def test_swish(self): + swishlayer = tl.layers.Swish() + swish = swishlayer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(swish), [5, 10]) + + def test_hardtanh(self): + hardtanhlayer = tl.layers.HardTanh() + hardtanh = hardtanhlayer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(hardtanh), [5, 10]) + + def test_mish(self): + mishlayer = tl.layers.Mish() + mish = mishlayer(self.inputs) + + self.assertTrue(tl.get_tensor_shape(mish), [5, 10]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_convolution.py b/tests/layers/test_layers_convolution.py index 6787c592a..20fb15afc 100644 --- a/tests/layers/test_layers_convolution.py +++ b/tests/layers/test_layers_convolution.py @@ -4,218 +4,79 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Convolution_1D_Test(CustomTestCase): @classmethod - def setUpClass(cls): - print("\n#################################") - - cls.batch_size = 8 - cls.inputs_shape = [cls.batch_size, 100, 1] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') - - cls.n1 = tl.layers.Conv1dLayer(shape=(5, 1, 32), stride=2)(cls.input_layer) + def setUpClass(self): - cls.n2 = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2)(cls.n1) + self.batch_size = 8 + self.inputs_shape = [self.batch_size, 100, 1] + self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') - cls.n3 = tl.layers.DeConv1dLayer( - shape=(5, 64, 32), outputs_shape=(cls.batch_size, 50, 64), strides=(1, 2, 1), name='deconv1dlayer' - )(cls.n2) + self.conv1dlayer1 = tl.layers.Conv1d(in_channels=1, n_filter=32, filter_size=5, stride=2) + self.n1 = self.conv1dlayer1(self.input_layer) - cls.n4 = tl.layers.SeparableConv1d( - n_filter=32, filter_size=3, strides=2, padding='SAME', act='relu', name='separable_1d' - )(cls.n3) + self.conv1dlayer2 = tl.layers.Conv1d(in_channels=32, n_filter=32, filter_size=5, stride=2) + self.n2 = self.conv1dlayer2(self.n1) - cls.n5 = tl.layers.SubpixelConv1d(scale=2, act=tf.nn.relu, in_channels=32, name='subpixel_1d')(cls.n4) - - cls.model = Model(inputs=cls.input_layer, outputs=cls.n5) - print("Testing Conv1d model: \n", cls.model) + self.dconv1dlayer1 = tl.layers.DeConv1d(n_filter=64, in_channels=32, filter_size=5, name='deconv1dlayer') + self.n3 = self.dconv1dlayer1(self.n2) @classmethod - def tearDownClass(cls): + def tearDownClass(self): pass - # tf.reset_default_graph() def test_layer_n1(self): - - # self.assertEqual(len(self.n1.all_layers), 2) - # self.assertEqual(len(self.n1.all_params), 2) - # self.assertEqual(self.n1.count_params(), 192) - self.assertEqual(len(self.n1._info[0].layer.all_weights), 2) - self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32]) + self.assertEqual(len(self.conv1dlayer1.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n1), [self.batch_size, 50, 32]) def test_layer_n2(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 5344) - self.assertEqual(len(self.n2._info[0].layer.all_weights), 2) - self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32]) + self.assertEqual(len(self.conv1dlayer2.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n2), [self.batch_size, 25, 32]) def test_layer_n3(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 5344) - self.assertEqual(len(self.n3._info[0].layer.all_weights), 2) - self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 64]) - - def test_layer_n4(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 5344) - self.assertEqual(len(self.n4._info[0].layer.all_weights), 3) - self.assertEqual(self.n4.get_shape().as_list()[1:], [25, 32]) - - def test_layer_n5(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 5344) - self.assertEqual(self.n5.get_shape().as_list()[1:], [50, 16]) - - # def test_layer_n3(self): - # - # self.assertEqual(len(self.n3.all_layers), 4) - # self.assertEqual(len(self.n3.all_params), 7) - # self.assertEqual(self.n3.count_params(), 6496) - # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32]) - - -# FIXME: TF2.0 only supports NHWC now -# class Layer_Convolution_1D_NCW_Test(CustomTestCase): -# -# @classmethod -# def setUpClass(cls): -# print("\n#################################") -# -# cls.batch_size = 8 -# cls.inputs_shape = [cls.batch_size, 1, 100] -# cls.input_layer = Input(cls.inputs_shape, name='input_layer') -# -# cls.n1 = tl.layers.Conv1dLayer( -# shape=(5, 1, 32), stride=2, data_format="NCW" -# )(cls.input_layer) -# cls.n2 = tl.layers.Conv1d( -# n_filter=32, filter_size=5, stride=2, data_format='channels_first' -# )(cls.n1) -# cls.model = Model(inputs=cls.input_layer, outputs=cls.n2) -# print("Testing Conv1d model: \n", cls.model) -# -# # cls.n3 = tl.layers.SeparableConv1d( -# # cls.n2, n_filter=32, filter_size=3, strides=1, padding='VALID', act=tf.nn.relu, name='separable_1d' -# # ) -# -# @classmethod -# def tearDownClass(cls): -# pass -# # tf.reset_default_graph() -# -# def test_layer_n1(self): -# -# # self.assertEqual(len(self.n1.all_layers), 2) -# # self.assertEqual(len(self.n1.all_params), 2) -# # self.assertEqual(self.n1.count_params(), 192) -# self.assertEqual(len(self.n1._info[0].layer.all_weights), 2) -# self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32]) -# -# def test_layer_n2(self): -# -# # self.assertEqual(len(self.n2.all_layers), 3) -# # self.assertEqual(len(self.n2.all_params), 4) -# # self.assertEqual(self.n2.count_params(), 5344) -# self.assertEqual(len(self.n2._info[0].layer.all_weights), 2) -# self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32]) -# -# # def test_layer_n3(self): -# # -# # self.assertEqual(len(self.n3.all_layers), 4) -# # self.assertEqual(len(self.n3.all_params), 7) -# # self.assertEqual(self.n3.count_params(), 6496) -# # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32]) + self.assertEqual(len(self.dconv1dlayer1.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n3), [self.batch_size, 25, 64]) class Layer_Convolution_2D_Test(CustomTestCase): @classmethod - def setUpClass(cls): - print("\n#################################") - - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 400, 400, 3] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') - - cls.n1 = tl.layers.Conv2dLayer( - act=tf.nn.relu, shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) - - cls.n2 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d')(cls.n1) - - cls.n3 = tl.layers.Conv2d( - n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias' - )(cls.n2) - - cls.n4 = tl.layers.DeConv2dLayer( - shape=(5, 5, 32, 32), outputs_shape=(cls.batch_size, 100, 100, 32), strides=(1, 2, 2, 1), - name='deconv2dlayer' - )(cls.n3) + def setUpClass(self): - cls.n5 = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d')(cls.n4) + self.batch_size = 5 + self.inputs_shape = [self.batch_size, 400, 400, 3] + self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') - cls.n6 = tl.layers.DepthwiseConv2d( - filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tf.nn.relu, depth_multiplier=2, - name='depthwise' - )(cls.n5) + self.conv2dlayer1 = tl.layers.Conv2d(n_filter=32, in_channels=3, strides=(2, 2), filter_size=(5, 5), + padding='SAME', b_init=tl.initializers.truncated_normal(0.01), name='conv2dlayer' + ) + self.n1 = self.conv2dlayer1(self.input_layer) - cls.n7 = tl.layers.Conv2d( - n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=64, name='conv2d2' - )(cls.n6) + self.conv2dlayer2 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), + strides=(2, 2), act=None, name='conv2d') + self.n2 = self.conv2dlayer2(self.n1) - cls.n8 = tl.layers.BinaryConv2d( - n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=32, name='binaryconv2d' - )(cls.n7) + self.conv2dlayer3 = tl.layers.Conv2d(in_channels=32, n_filter=32, filter_size=(3, 3), strides=(2, 2), + act=tl.ReLU, b_init=None, name='conv2d_no_bias' + ) + self.n3 = self.conv2dlayer3(self.n2) - cls.n9 = tl.layers.SeparableConv2d( - n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='separableconv2d' - )(cls.n8) + self.dconv2dlayer = tl.layers.DeConv2d(n_filter=32, in_channels=32, filter_size=(5, 5), strides=(2, 2), + name='deconv2dlayer' + ) + self.n4 = self.dconv2dlayer(self.n3) - cls.n10 = tl.layers.GroupConv2d(n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, - name='group')(cls.n9) - - cls.n11 = tl.layers.DorefaConv2d( - n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='dorefaconv2d' - )(cls.n10) - - cls.n12 = tl.layers.TernaryConv2d( - n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d' - )(cls.n11) - - cls.n13 = tl.layers.QuanConv2d( - n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d' - )(cls.n12) - - cls.n14 = tl.layers.SubpixelConv2d(scale=2, act=tf.nn.relu, name='subpixelconv2d')(cls.n13) - - cls.n15 = tl.layers.QuanConv2dWithBN( - n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnnbn2d' - )(cls.n14) - - cls.model = Model(cls.input_layer, cls.n15) - print("Testing Conv2d model: \n", cls.model) - - # cls.n12 = tl.layers.QuanConv2d(cls.n11, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='quancnn') + self.dwconv2dlayer = tl.layers.DepthwiseConv2d(in_channels=32, filter_size=(3, 3), strides=(1, 1), + dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=2,name='depthwise') + self.n5 = self.dwconv2dlayer(self.n4) @classmethod def tearDownClass(cls): @@ -223,274 +84,62 @@ def tearDownClass(cls): # tf.reset_default_graph() def test_layer_n1(self): - - # self.assertEqual(len(self.n1.all_layers), 2) - # self.assertEqual(len(self.n1.all_params), 2) - # self.assertEqual(self.n1.count_params(), 2432) - self.assertEqual(len(self.n1._info[0].layer.all_weights), 2) - self.assertEqual(self.n1.get_shape().as_list()[1:], [200, 200, 32]) + self.assertEqual(len(self.conv2dlayer1.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n1), [self.batch_size, 200, 200, 32]) def test_layer_n2(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 11680) - self.assertEqual(len(self.n2._info[0].layer.all_weights), 2) - self.assertEqual(self.n2.get_shape().as_list()[1:], [100, 100, 32]) + self.assertEqual(len(self.conv2dlayer2.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n2), [self.batch_size, 100, 100, 32]) def test_layer_n3(self): - - # self.assertEqual(len(self.n3.all_layers), 4) - # self.assertEqual(len(self.n3.all_params), 5) - # self.assertEqual(self.n3.count_params(), 20896) - self.assertEqual(len(self.n3._info[0].layer.all_weights), 1) # b_init is None - self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 50, 32]) + self.assertEqual(len(self.conv2dlayer3.all_weights), 1) # b_init is None + self.assertEqual(tl.get_tensor_shape(self.n3), [self.batch_size, 50, 50, 32]) def test_layer_n4(self): - - # self.assertEqual(len(self.n4.all_layers), 5) - # self.assertEqual(len(self.n4.all_params), 7) - # self.assertEqual(self.n4.count_params(), 46528) - self.assertEqual(len(self.n4._info[0].layer.all_weights), 2) - self.assertEqual(self.n4.get_shape().as_list()[1:], [100, 100, 32]) + self.assertEqual(len(self.dconv2dlayer.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n4), [self.batch_size, 100, 100, 32]) def test_layer_n5(self): - - # self.assertEqual(len(self.n5.all_layers), 6) - # self.assertEqual(len(self.n5.all_params), 9) - # self.assertEqual(self.n5.count_params(), 55776) - self.assertEqual(len(self.n5._info[0].layer.all_weights), 2) - self.assertEqual(self.n5.get_shape().as_list()[1:], [200, 200, 32]) - - def test_layer_n6(self): - - # self.assertEqual(len(self.n6.all_layers), 7) - # self.assertEqual(len(self.n6.all_params), 11) - # self.assertEqual(self.n6.count_params(), 56416) - self.assertEqual(len(self.n6._info[0].layer.all_weights), 2) - self.assertEqual(self.n6.get_shape().as_list()[1:], [200, 200, 64]) - - def test_layer_n7(self): - - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n7._info[0].layer.all_weights), 2) - self.assertEqual(self.n7.get_shape().as_list()[1:], [100, 100, 32]) - - def test_layer_n8(self): - - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n8._info[0].layer.all_weights), 2) - self.assertEqual(self.n8.get_shape().as_list()[1:], [50, 50, 64]) - - def test_layer_n9(self): - - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n9._info[0].layer.all_weights), 3) - self.assertEqual(self.n9.get_shape().as_list()[1:], [24, 24, 32]) - - def test_layer_n10(self): - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n10._info[0].layer.all_weights), 2) - self.assertEqual(self.n10.get_shape().as_list()[1:], [12, 12, 64]) - - def test_layer_n11(self): - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n11._info[0].layer.all_weights), 2) - self.assertEqual(self.n11.get_shape().as_list()[1:], [12, 12, 32]) - - def test_layer_n12(self): - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n12._info[0].layer.all_weights), 2) - self.assertEqual(self.n12.get_shape().as_list()[1:], [12, 12, 64]) - - def test_layer_n13(self): - # self.assertEqual(len(self.n7.all_layers), 8) - # self.assertEqual(len(self.n7.all_params), 13) - # self.assertEqual(self.n7.count_params(), 74880) - self.assertEqual(len(self.n13._info[0].layer.all_weights), 2) - self.assertEqual(self.n13.get_shape().as_list()[1:], [12, 12, 32]) - - def test_layer_n14(self): - self.assertEqual(self.n14.get_shape().as_list()[1:], [24, 24, 8]) - - def test_layer_n15(self): - self.assertEqual(len(self.n15._info[0].layer.all_weights), 5) - self.assertEqual(self.n15.get_shape().as_list()[1:], [24, 24, 64]) - - # def test_layer_n8(self): - # - # self.assertEqual(len(self.n8.all_layers), 9) - # self.assertEqual(len(self.n8.all_params), 15) - # self.assertEqual(self.n8.count_params(), 79520) - # self.assertEqual(self.n8.outputs.get_shape().as_list()[1:], [50, 50, 32]) - # - # def test_layer_n9(self): - # - # self.assertEqual(len(self.n9.all_layers), 10) - # self.assertEqual(len(self.n9.all_params), 18) - # self.assertEqual(self.n9.count_params(), 80864) - # self.assertEqual(self.n9.outputs.get_shape().as_list()[1:], [48, 48, 32]) - # - # def test_layer_n10(self): - # - # self.assertEqual(len(self.n10.all_layers), 11) - # self.assertEqual(len(self.n10.all_params), 20) - # self.assertEqual(self.n10.count_params(), 132128) - # self.assertEqual(self.n10.outputs.get_shape().as_list()[1:], [48, 48, 64]) - # - # def test_layer_n11(self): - # - # self.assertEqual(len(self.n11.all_layers), 12) - # self.assertEqual(len(self.n11.all_params), 22) - # self.assertEqual(self.n11.count_params(), 150592) - # self.assertEqual(self.n11.outputs.get_shape().as_list()[1:], [96, 96, 32]) - # - # def test_layer_n12(self): - # - # self.assertEqual(len(self.n12.all_layers), 13) - # self.assertEqual(len(self.n12.all_params), 24) - # self.assertEqual(self.n12.count_params(), 201856) - # self.assertEqual(self.n12.outputs.get_shape().as_list()[1:], [96, 96, 64]) + self.assertEqual(len(self.dwconv2dlayer.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n5), [self.batch_size, 100, 100, 64]) class Layer_Convolution_3D_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("\n#################################") - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 20, 20, 20, 3] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') + self.batch_size = 5 + self.inputs_shape = [self.batch_size, 20, 20, 20, 3] + self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') - cls.n1 = tl.layers.Conv3dLayer(shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1))(cls.input_layer) + self.conv3dlayer1 = tl.layers.Conv3d(n_filter=32, in_channels=3, filter_size=(2, 2, 2), strides=(2, 2, 2)) + self.n1 = self.conv3dlayer1(self.input_layer) - cls.n2 = tl.layers.DeConv3dLayer( - shape=(2, 2, 2, 128, 32), outputs_shape=(cls.batch_size, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1) - )(cls.n1) + self.deconv3dlayer = tl.layers.DeConv3d(n_filter=128, in_channels=32, filter_size=(2, 2, 2), strides=(2, 2, 2) + ) + self.n2 = self.deconv3dlayer(self.n1) - cls.n3 = tl.layers.Conv3d( - n_filter=64, filter_size=(3, 3, 3), strides=(3, 3, 3), act=tf.nn.relu, b_init=None, in_channels=128, - name='conv3d_no_bias' - )(cls.n2) - - cls.n4 = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2))(cls.n3) - - cls.model = Model(inputs=cls.input_layer, outputs=cls.n4) - print("Testing Conv3d model: \n", cls.model) + self.conv3dlayer2 = tl.layers.Conv3d(n_filter=64, in_channels=128,filter_size=(3, 3, 3), strides=(3, 3, 3), + act=tl.ReLU, b_init=None, name='conv3d_no_bias') + self.n3 = self.conv3dlayer2(self.n2) @classmethod - def tearDownClass(cls): + def tearDownClass(self): pass - # tf.reset_default_graph() def test_layer_n1(self): - - # self.assertEqual(len(self.n1.all_layers), 2) - # self.assertEqual(len(self.n1.all_params), 2) - # self.assertEqual(self.n1.count_params(), 800) - self.assertEqual(len(self.n1._info[0].layer.all_weights), 2) - self.assertEqual(self.n1.get_shape().as_list()[1:], [10, 10, 10, 32]) + self.assertEqual(len(self.conv3dlayer1.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n1), [self.batch_size, 10, 10, 10, 32]) def test_layer_n2(self): - - # self.assertEqual(len(self.n2.all_layers), 3) - # self.assertEqual(len(self.n2.all_params), 4) - # self.assertEqual(self.n2.count_params(), 33696) - self.assertEqual(len(self.n2._info[0].layer.all_weights), 2) - self.assertEqual(self.n2.get_shape().as_list()[1:], [20, 20, 20, 128]) + self.assertEqual(len(self.deconv3dlayer.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n2), [self.batch_size, 20, 20, 20, 128]) def test_layer_n3(self): - - # self.assertEqual(len(self.n3.all_layers), 4) - # self.assertEqual(len(self.n3.all_params), 6) - # self.assertEqual(self.n3.count_params(), 144320) - self.assertEqual(len(self.n3._info[0].layer.all_weights), 1) # b_init is None - self.assertEqual(self.n3.get_shape().as_list()[1:], [7, 7, 7, 64]) - - def test_layer_n4(self): - - # self.assertEqual(len(self.n3.all_layers), 4) - # self.assertEqual(len(self.n3.all_params), 6) - # self.assertEqual(self.n3.count_params(), 144320) - self.assertEqual(len(self.n4._info[0].layer.all_weights), 2) - self.assertEqual(self.n4.get_shape().as_list()[1:], [14, 14, 14, 32]) - - -# class Layer_DeformableConvolution_Test(CustomTestCase): -# -# @classmethod -# def setUpClass(cls): -# -# cls.batch_size = 5 -# cls.inputs_shape = [cls.batch_size, 299, 299, 3] -# cls.input_layer = Input(cls.inputs_shape, name='input_layer') -# -# offset1 = tl.layers.Conv2d( -# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset1' -# )(cls.input_layer) -# cls.net1 = tl.layers.DeformableConv2d( -# offset1, 32, (3, 3), act=tf.nn.relu, name='deformable1' -# )(cls.input_layer) -# -# offset2 = tl.layers.Conv2d( -# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset2' -# )(cls.net1) -# cls.net2 = tl.layers.DeformableConv2d( -# offset2, 64, (3, 3), act=tf.nn.relu, name='deformable2' -# )(cls.net1) -# -# @classmethod -# def tearDownClass(cls): -# pass -# -# def test_layer_n1(self): -# -# self.assertEqual(len(self.net1.all_layers), 2) -# self.assertEqual(len(self.net1.all_params), 2) -# self.assertEqual(self.net1.count_params(), 896) -# self.assertEqual(self.net1.outputs.get_shape().as_list()[1:], [299, 299, 32]) -# -# def test_layer_n2(self): -# -# self.assertEqual(len(self.net2.all_layers), 3) -# self.assertEqual(len(self.net2.all_params), 4) -# self.assertEqual(self.net2.count_params(), 19392) -# self.assertEqual(self.net2.outputs.get_shape().as_list()[1:], [299, 299, 64]) - - -class Exception_test(CustomTestCase): - - @classmethod - def setUpClass(cls): - print("##### begin testing exception in activation #####") - - def test_exception(cls): - - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 400, 400, 3] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') - - try: - cls.n1 = tl.layers.Conv2dLayer( - act='activation', shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) - except Exception as e: - cls.assertIsInstance(e, Exception) - print(e) + self.assertEqual(len(self.conv3dlayer2.all_weights), 1) # b_init is None + self.assertEqual(tl.get_tensor_shape(self.n3), [self.batch_size, 7, 7, 7, 64]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_core_act.py b/tests/layers/test_layers_core_act.py index 549a192ab..71a85d787 100644 --- a/tests/layers/test_layers_core_act.py +++ b/tests/layers/test_layers_core_act.py @@ -3,103 +3,98 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Convolution_2D_Test(CustomTestCase): @classmethod - def setUpClass(cls): - print("##### begin testing activation #####") - - @classmethod - def tearDownClass(cls): - pass - # tf.reset_default_graph() - - def test_layer_core_act(cls): - - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 400, 400, 3] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') + def setUpClass(self): + self.batch_size = 5 + self.inputs_shape = [self.batch_size, 400, 400, 3] + self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') + + self.conv2dlayer1 = tl.layers.Conv2d(n_filter=32, in_channels=3, act=tl.ReLU, filter_size=(5, 5), + strides=(2, 2), + padding='SAME', b_init=tl.initializers.constant(value=0.0), + name='conv2dlayer' + ) + self.n1 = self.conv2dlayer1(self.input_layer) + + self.conv2dlayer2 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), + act="relu", name='conv2d') + self.n2 = self.conv2dlayer2(self.n1) + + self.conv2dlayer3 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), + act="leaky_relu", b_init=None) + self.n3 = self.conv2dlayer3(self.n2) + + self.conv2dlayer4 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), + act="lrelu", b_init=None) + self.n4 = self.conv2dlayer4(self.n3) + + self.conv2dlayer5 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), + act="sigmoid") + self.n5 = self.conv2dlayer5(self.n4) + + self.conv2dlayer6 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), + act="tanh") + self.n6 = self.conv2dlayer6(self.n5) + + self.conv2dlayer7 = tl.layers.Conv2d( + n_filter=32, filter_size=(3, 3), strides=(2, 2), act="leaky_relu0.22", in_channels=32 + ) + self.n7 = self.conv2dlayer7(self.n6) - cls.n1 = tl.layers.Conv2dLayer( - act=tf.nn.relu, shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) + self.conv2dlayer8 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="lrelu0.22", + in_channels=32) + self.n8 = self.conv2dlayer8(self.n7) - cls.n2 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="relu", name='conv2d')(cls.n1) + self.conv2dlayer9 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="softplus", + in_channels=32) + self.n9 = self.conv2dlayer9(self.n8) - cls.n3 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="leaky_relu", - b_init=None)(cls.n2) + self.conv2dlayer10 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="relu6", + in_channels=32) + self.n10 = self.conv2dlayer10(self.n9) - cls.n4 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="lrelu", b_init=None)(cls.n2) + @classmethod + def tearDownClass(self): + pass - cls.n5 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="sigmoid", - in_channels=32)(cls.n4) + def test_relu(self): + self.assertEqual(tl.get_tensor_shape(self.n1), [5, 200, 200, 32]) - cls.n6 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="tanh", in_channels=32)(cls.n5) + def test_relu_str(self): + self.assertEqual(tl.get_tensor_shape(self.n2), [5, 100, 100, 32]) - cls.n7 = tl.layers.Conv2d( - n_filter=32, filter_size=(3, 3), strides=(2, 2), act="leaky_relu0.22", in_channels=32 - )(cls.n6) + def test_leaky_relu_str(self): + self.assertEqual(tl.get_tensor_shape(self.n3), [5, 50, 50, 32]) - cls.n8 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="lrelu0.22", - in_channels=32)(cls.n7) + def test_lrelu_str(self): + self.assertEqual(tl.get_tensor_shape(self.n4), [5, 25, 25, 32]) - cls.n9 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="softplus", - in_channels=32)(cls.n8) + def test_sigmoid_str(self): + self.assertEqual(tl.get_tensor_shape(self.n5), [5, 13, 13, 32]) - cls.n10 = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act="relu6", in_channels=32)(cls.n9) + def test_tanh_str(self): + self.assertEqual(tl.get_tensor_shape(self.n6), [5, 7, 7, 32]) - cls.model = Model(cls.input_layer, cls.n8) + def test_leaky_relu_float_str(self): + self.assertEqual(tl.get_tensor_shape(self.n7), [5, 4, 4, 32]) + def test_lrelu_float_str(self): + self.assertEqual(tl.get_tensor_shape(self.n8), [5, 2, 2, 32]) -class Exception_test(CustomTestCase): + def test_softplus_str(self): + self.assertEqual(tl.get_tensor_shape(self.n9), [5, 1, 1, 32]) - @classmethod - def setUpClass(cls): - print("##### begin testing exception in activation #####") - - def test_exception(cls): - - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 400, 400, 3] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') - - try: - cls.n1 = tl.layers.Conv2dLayer( - act='activation', shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) - except Exception as e: - cls.assertIsInstance(e, Exception) - print(e) - - try: - cls.n2 = tl.layers.Conv2dLayer( - act='leaky_relu0.2x', shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) - except Exception as e: - cls.assertIsInstance(e, Exception) - print(e) - - try: - cls.n3 = tl.layers.Conv2dLayer( - act='lrelu0.2x', shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME', - b_init=tf.constant_initializer(value=0.0), name='conv2dlayer' - )(cls.input_layer) - except Exception as e: - cls.assertIsInstance(e, Exception) - print(e) + def test_relu6_str(self): + self.assertEqual(tl.get_tensor_shape(self.n10), [5, 1, 1, 32]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_core_basedense_dropout.py b/tests/layers/test_layers_core_basedense_dropout.py index c3ecfebc5..a926a745b 100644 --- a/tests/layers/test_layers_core_basedense_dropout.py +++ b/tests/layers/test_layers_core_basedense_dropout.py @@ -4,168 +4,69 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Core_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): + + self.batch_size = 8 - cls.batch_size = 8 + self.inputs_shape = [self.batch_size, 784] + self.input = tl.layers.Input(self.inputs_shape) + self.dense1 = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=784, name='test_dense') + self.n1 = self.dense1(self.input) - # ============== Layer ============== + self.dropout1 = tl.layers.Dropout(keep=0.8) + self.n2 = self.dropout1(self.n1) - cls.base_layer = Layer(what=None) + self.dense2 = tl.layers.Dense(n_units=10, act='relu', b_init=None, in_channels=800) + self.n3 = self.dense2(self.n2) - # ============== DenseLayer ============== + self.dense3 = tl.layers.Dense(n_units=10, act='relu', b_init=None, in_channels=10) + self.n4 = self.dense3(self.n3) - cls.inputs_shape = [None, 784] - cls.innet = Input(cls.inputs_shape) - cls.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784, name='test_dense')(cls.innet) - cls.dropout1 = Dropout(keep=0.8)(cls.dense1) - cls.dense2 = Dense(n_units=10, act=tf.nn.relu, b_init=None)(cls.dropout1) - cls.dense3 = Dense(n_units=10, act=tf.nn.relu, b_init=None) - cls.concat = Concat(concat_dim=-1)([cls.dense2, cls.dropout1]) + self.concat = tl.layers.Concat(concat_dim=-1)([self.n2, self.n3]) + + class get_model(tl.layers.Module): + def __init__(self): + super(get_model, self).__init__() + self.layer1 = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=784, name='test_dense') + self.dp = tl.layers.Dropout(keep=0.8) + self.layer2 = tl.layers.Dense(n_units=10, act='relu', b_init=None, in_channels=800) + self.layer3 = tl.layers.Dense(n_units=10, act='relu', b_init=None, in_channels=10) + + def forward(self, inputs): + z = self.layer1(inputs) + z = self.dp(z) + z = self.layer2(z) + z = self.layer3(z) + return z + + self.net = get_model() - cls.model = Model(inputs=cls.innet, outputs=cls.dense2) @classmethod def tearDownClass(cls): pass - def test_net1(self): + def test_dense(self): + self.assertEqual(tl.get_tensor_shape(self.n1), [self.batch_size, 800]) - # test exceptional cases - try: - self.base_layer.build(None) - except Exception as e: - print(e) - - try: - self.base_layer.forward(None) - except Exception as e: - print(e) - - try: - self.base_layer[4] = 1 - except Exception as e: - print(e) - - try: - del self.base_layer[4] - except Exception as e: - print(e) - - try: - Layer(what=1) - except Exception as e: - print(e) - - def test_net2(self): - - # test weights - self.assertEqual(self.innet._info[0].layer.all_weights, []) - self.assertEqual(self.dropout1._info[0].layer.all_weights, []) - self.assertEqual(self.dense1._info[0].layer.all_weights[0].get_shape().as_list(), [784, 800]) - self.assertEqual(self.dense1._info[0].layer.all_weights[1].get_shape().as_list(), [ - 800, - ]) - self.assertEqual(self.dense2._info[0].layer.all_weights[0].get_shape().as_list(), [800, 10]) - self.assertEqual(len(self.dense1._info[0].layer.all_weights), 2) - self.assertEqual(len(self.dense2._info[0].layer.all_weights), 1) - - self.assertEqual(len(self.model.all_weights), 3) - - # a special case - self.model.release_memory() - - # test printing - # print(self.innet) - # print(self.dense1) - # print(self.dropout1) - # print(self.dense2) - # print(self.dense3) - - def test_special_cases(self): - try: - innet = Input([121]) - dense1 = Dense(n_units=800, act=tf.nn.relu)(innet) - except Exception as e: - print(e) - - def test_modellayer(self): - - data = np.random.normal(size=[self.batch_size, self.inputs_shape[1]]).astype(np.float32) - - origin_results_train = self.model(data, is_train=True) - origin_results_test = self.model(data, is_train=False) - - new_innet = Input(self.inputs_shape) - new_mlayer = ModelLayer(self.model)(new_innet) - - newmodel = Model(inputs=new_innet, outputs=new_mlayer) - - new_results_train = newmodel(data, is_train=True) - new_results_test = newmodel(data, is_train=False) - - self.assertEqual(origin_results_train.shape, new_results_train.shape) - self.assertTrue(np.array_equal(origin_results_test.shape, new_results_test.shape)) - - newmodel.release_memory() - - def test_layerlist(self): - innet = Input(self.inputs_shape) - hlayer = LayerList( - [ - ModelLayer(self.model), - LayerList([Dense(n_units=100), Dense(n_units=10)]), - Dense(n_units=5), - Dense(n_units=4) - ] - )(innet) - model = Model(inputs=innet, outputs=hlayer) - - # for w in model.all_weights: - # print(w.name) - - data = np.random.normal(size=[self.batch_size, self.inputs_shape[1]]).astype(np.float32) - pred = model(data, is_train=False) - self.assertEqual(pred.get_shape().as_list(), [self.batch_size, 4]) - - print(model) - - model.release_memory() - - def test_duplicate_names(self): - dense1 = tl.layers.Dense(n_units=10, name='test_densehh') - print(dense1) - try: - dense2 = tl.layers.Dense(n_units=10, name='test_densehh') - print(dense2) - except Exception as e: - print(e) - dense1 = tl.layers.Dense(n_units=10, name='test_densehh1') - dense2 = tl.layers.Dense(n_units=10, name='test_densehh2') - print(dense1) - print(dense2) + def test_dense_nonbias(self): + self.assertEqual(len(self.dense2.all_weights), 1) def test_dropout(self): - data_x = np.random.random([10, 784]).astype(np.float32) - pred_y_1 = self.model(data_x, is_train=True) - pred_y_2 = self.model(data_x, is_train=True) - self.assertFalse(np.allclose(pred_y_1, pred_y_2)) - pred_y_1 = self.model(data_x, is_train=False) - pred_y_2 = self.model(data_x, is_train=False) - self.assertTrue(np.allclose(pred_y_1, pred_y_2)) + self.assertEqual(len(self.dropout1.all_weights), 0) + + def test_model(self): + self.assertEqual(len(self.net.all_weights), 4) if __name__ == '__main__': diff --git a/tests/layers/test_layers_core_nested.py b/tests/layers/test_layers_core_nested.py index 1c5ef5908..167d7af3a 100644 --- a/tests/layers/test_layers_core_nested.py +++ b/tests/layers/test_layers_core_nested.py @@ -3,13 +3,12 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase +import numpy as np -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_nested(CustomTestCase): @@ -21,11 +20,10 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): pass - # tf.reset_default_graph() def test_nested_layer_with_inchannels(cls): - class MyLayer(tl.layers.Layer): + class MyLayer(tl.layers.Module): def __init__(self, name=None): super(MyLayer, self).__init__(name=name) @@ -38,10 +36,10 @@ def build(self, inputs_shape=None): def forward(self, inputs): inputs = self.input_layer(inputs) - output = tf.matmul(inputs, self.W) + output = tl.ops.matmul(inputs, self.W) return output - class model(tl.models.Model): + class model(tl.layers.Module): def __init__(self, name=None): super(model, self).__init__(name=name) @@ -50,70 +48,15 @@ def __init__(self, name=None): def forward(self, inputs): return self.layer(inputs) - input = tf.random.normal(shape=(100, 50)) + input = tl.layers.Input(shape=(100, 50)) model_dynamic = model() - model_dynamic.train() + model_dynamic.set_train() cls.assertEqual(model_dynamic(input).shape, (100, 10)) cls.assertEqual(len(model_dynamic.all_weights), 3) cls.assertEqual(len(model_dynamic.trainable_weights), 3) - model_dynamic.layer.input_layer.b.assign_add(tf.ones((20, ))) - cls.assertEqual(np.sum(model_dynamic.all_weights[-1].numpy() - tf.ones(20, ).numpy()), 0) - - ni = tl.layers.Input(shape=(100, 50)) - nn = MyLayer(name='mylayer1')(ni) - model_static = tl.models.Model(inputs=ni, outputs=nn) - model_static.eval() - cls.assertEqual(model_static(input).shape, (100, 10)) - cls.assertEqual(len(model_static.all_weights), 3) - cls.assertEqual(len(model_static.trainable_weights), 3) - model_static.get_layer('mylayer1').input_layer.b.assign_add(tf.ones((20, ))) - cls.assertEqual(np.sum(model_static.all_weights[-1].numpy() - tf.ones(20, ).numpy()), 0) - - def test_nested_layer_without_inchannels(cls): - - class MyLayer(tl.layers.Layer): - - def __init__(self, name=None): - super(MyLayer, self).__init__(name=name) - self.input_layer = tl.layers.Dense(n_units=20) # no need for in_channels here - self.build(None) - self._built = True - - def build(self, inputs_shape=None): - self.W = self._get_weights('weights', shape=(20, 10)) - def forward(self, inputs): - inputs = self.input_layer(inputs) - output = tf.matmul(inputs, self.W) - return output - - class model(tl.models.Model): - - def __init__(self, name=None): - super(model, self).__init__(name=name) - self.layer = MyLayer() - - def forward(self, inputs): - return self.layer(inputs) - - input = tf.random.normal(shape=(100, 50)) - model_dynamic = model() - model_dynamic.train() - cls.assertEqual(model_dynamic(input).shape, (100, 10)) - cls.assertEqual(len(model_dynamic.all_weights), 3) - cls.assertEqual(len(model_dynamic.trainable_weights), 3) - model_dynamic.layer.input_layer.b.assign_add(tf.ones((20, ))) - cls.assertEqual(np.sum(model_dynamic.all_weights[-1].numpy() - tf.ones(20, ).numpy()), 0) - - ni = tl.layers.Input(shape=(100, 50)) - nn = MyLayer(name='mylayer2')(ni) - model_static = tl.models.Model(inputs=ni, outputs=nn) - model_static.eval() - cls.assertEqual(model_static(input).shape, (100, 10)) - cls.assertEqual(len(model_static.all_weights), 3) - cls.assertEqual(len(model_static.trainable_weights), 3) - model_static.get_layer('mylayer2').input_layer.b.assign_add(tf.ones((20, ))) - cls.assertEqual(np.sum(model_static.all_weights[-1].numpy() - tf.ones(20, ).numpy()), 0) + model_dynamic.layer.input_layer.b.assign_add(tl.ops.ones((20, ))) + cls.assertEqual(np.sum(model_dynamic.all_weights[-1].numpy() - tl.ops.ones(20, ).numpy()), 0) if __name__ == '__main__': diff --git a/tests/layers/test_layers_deformable_convolution.py b/tests/layers/test_layers_deformable_convolution.py index 8c5df8e8d..a449b420d 100644 --- a/tests/layers/test_layers_deformable_convolution.py +++ b/tests/layers/test_layers_deformable_convolution.py @@ -4,53 +4,44 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - - class Layer_Convolution_2D_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("\n#################################") - cls.batch_size = 5 - cls.inputs_shape = [cls.batch_size, 10, 10, 16] - cls.input_layer = Input(cls.inputs_shape, name='input_layer') - - cls.offset1 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', - name='offset1')(cls.input_layer) - cls.deformconv1 = tl.layers.DeformableConv2d( - offset_layer=cls.offset1, n_filter=32, filter_size=(3, 3), act=tf.nn.relu, name='deformable1' - )(cls.input_layer) - cls.offset2 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', - name='offset2')(cls.deformconv1) - cls.deformconv2 = tl.layers.DeformableConv2d( - offset_layer=cls.offset2, n_filter=64, filter_size=(3, 3), act=tf.nn.relu, name='deformable2' - )(cls.deformconv1) - - cls.model = Model(cls.input_layer, cls.deformconv2) - print("Testing Deformable Conv2d model: \n", cls.model) + self.batch_size = 5 + self.inputs_shape = [self.batch_size, 10, 10, 16] + self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') + + self.offset1 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', + name='offset1')(self.input_layer) + self.init_deformconv1 = tl.layers.DeformableConv2d( + offset_layer=self.offset1, n_filter=32, filter_size=(3, 3), act='relu', name='deformable1' + ) + self.deformconv1 = self.init_deformconv1(self.input_layer) + self.offset2 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', + name='offset2')(self.deformconv1) + self.deformconv2 = tl.layers.DeformableConv2d( + offset_layer=self.offset2, n_filter=64, filter_size=(3, 3), act='relu', name='deformable2' + )(self.deformconv1) @classmethod - def tearDownClass(cls): + def tearDownClass(self): pass def test_layer_n1(self): - self.assertEqual(len(self.deformconv1._info[0].layer.all_weights), 2) - self.assertEqual(self.deformconv1.get_shape().as_list()[1:], [10, 10, 32]) + self.assertEqual(len(self.init_deformconv1.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.deformconv1)[1:], [10, 10, 32]) def test_layer_n2(self): - - self.assertEqual(len(self.deformconv2._info[0].layer.all_weights), 2) - self.assertEqual(self.deformconv2.get_shape().as_list()[1:], [10, 10, 64]) + self.assertEqual(tl.get_tensor_shape(self.deformconv2)[1:], [10, 10, 64]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_dense.py b/tests/layers/test_layers_dense.py index b6f76c1c9..c8cc32682 100644 --- a/tests/layers/test_layers_dense.py +++ b/tests/layers/test_layers_dense.py @@ -3,41 +3,29 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tensorlayer.layers import * -from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase +import numpy as np class Layer_BinaryDense_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_BinaryDense_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = BinaryDense(n_units=5) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) + self.ni = tl.layers.Input(self.inputs_shape, name='input_layer') + self.layer1 = tl.layers.BinaryDense(n_units=5) - cls.layer2 = BinaryDense(n_units=5, in_channels=10) - cls.layer2._nodes_fixed = True + self.layer2 = tl.layers.BinaryDense(n_units=5, in_channels=10) - cls.inputs = tf.ones((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) - - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.ni) + self.n2 = self.layer2(self.ni) @classmethod def tearDownClass(cls): @@ -45,57 +33,27 @@ def tearDownClass(cls): def test_layer_n1(self): print(self.n1[0]) - self.assertEqual(tf.reduce_sum(self.n1).numpy() % 1, 0.0) # should be integer + self.assertEqual(tl.ops.ReduceSum()(self.n1).numpy() % 1, 0.0) # should be integer def test_layer_n2(self): print(self.n2[0]) - self.assertEqual(tf.reduce_sum(self.n2).numpy() % 1, 0.0) # should be integer - - def test_model_n3(self): - print(self.n3[0]) - self.assertEqual(tf.reduce_sum(self.n3).numpy() % 1, 0.0) # should be integer - - def test_exception(self): - try: - layer = BinaryDense(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = BinaryDense(n_units=5, use_gemm=True) - out = layer(self.ni) - self.fail('use gemm') - except Exception as e: - print(e) + self.assertEqual(tl.ops.ReduceSum()(self.n2).numpy() % 1, 0.0) # should be integer class Layer_DorefaDense_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_DorefaDense_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] - - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = DorefaDense(n_units=5) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) - - cls.layer2 = DorefaDense(n_units=5, in_channels=10) - cls.layer2._nodes_fixed = True + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.inputs = tf.ones((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) + self.ni = tl.layers.Input(self.inputs_shape, name='input_layer') + self.layer1 = tl.layers.DorefaDense(n_units=5) + self.layer2 = tl.layers.DorefaDense(n_units=5, in_channels=10) - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.ni) + self.n2 = self.layer2(self.ni) @classmethod def tearDownClass(cls): @@ -103,57 +61,26 @@ def tearDownClass(cls): def test_layer_n1(self): print(self.n1[0]) - # self.assertEqual(tf.reduce_sum(self.n1).numpy() % 1, 0.0) # should be integer def test_layer_n2(self): print(self.n2[0]) - # self.assertEqual(tf.reduce_sum(self.n2).numpy() % 1, 0.0) # should be integer - - def test_model_n3(self): - print(self.n3[0]) - # self.assertEqual(tf.reduce_sum(self.n3).numpy() % 1, 0.0) # should be integer - - def test_exception(self): - try: - layer = DorefaDense(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = DorefaDense(n_units=5, use_gemm=True) - out = layer(self.ni) - self.fail('use gemm') - except Exception as e: - print(e) class Layer_DropconnectDense_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_DropconnectDense_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] - - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = DropconnectDense(n_units=5, keep=1.0) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.layer2 = DropconnectDense(n_units=5, in_channels=10, keep=0.01) - cls.layer2._nodes_fixed = True + self.ni = tl.layers.Input(self.inputs_shape, name='input_layer') + self.layer1 = tl.layers.DropconnectDense(n_units=5, keep=1.0) - cls.inputs = tf.ones((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) + self.layer2 = tl.layers.DropconnectDense(n_units=5, in_channels=10, keep=0.01) - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.ni) + self.n2 = self.layer2(self.ni) @classmethod def tearDownClass(cls): @@ -163,55 +90,24 @@ def test_layer_n1(self): print(self.n1[0]) def test_layer_n2(self): - zero_rate = tf.reduce_mean(tf.cast(tf.equal(self.n2, 0.0), tf.float32)) - print(zero_rate) - self.assertGreater(zero_rate, 0.0) print(self.n2[0]) - def test_model_n3(self): - print(self.n3[0]) - - def test_exception(self): - try: - layer = DropconnectDense(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = DropconnectDense(n_units=5, keep=0.0) - self.fail('keep no elements') - except Exception as e: - self.assertIsInstance(e, ValueError) - print(e) - class Layer_QuanDense_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_QuanDense_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] - - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = QuanDense(n_units=5) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.layer2 = QuanDense(n_units=5, in_channels=10) - cls.layer2._nodes_fixed = True + self.ni = tl.layers.Input(self.inputs_shape, name='input_layer') + self.layer1 = tl.layers.QuanDense(n_units=5) - cls.inputs = tf.random.uniform((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) + self.layer2 = tl.layers.QuanDense(n_units=5, in_channels=10) - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.ni) + self.n2 = self.layer2(self.ni) @classmethod def tearDownClass(cls): @@ -223,50 +119,21 @@ def test_layer_n1(self): def test_layer_n2(self): print(self.n2[0]) - def test_model_n3(self): - print(self.n3[0]) - - def test_exception(self): - try: - layer = QuanDense(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = QuanDense(n_units=5, use_gemm=True) - out = layer(self.ni) - self.fail('use gemm') - except Exception as e: - print(e) - class Layer_QuanDenseWithBN_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_QuanDenseWithBN_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] - - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = QuanDenseWithBN(n_units=5) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.layer2 = QuanDenseWithBN(n_units=5, in_channels=10) - cls.layer2._nodes_fixed = True + self.inputs = tl.initializers.TruncatedNormal()(shape=self.inputs_shape) + self.layer1 = tl.layers.QuanDenseWithBN(n_units=5) + self.layer2 = tl.layers.QuanDenseWithBN(n_units=5, in_channels=10) - cls.inputs = tf.random.uniform((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) - - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.inputs) + self.n2 = self.layer2(self.inputs) @classmethod def tearDownClass(cls): @@ -278,50 +145,21 @@ def test_layer_n1(self): def test_layer_n2(self): print(self.n2[0]) - def test_model_n3(self): - print(self.n3[0]) - - def test_exception(self): - try: - layer = QuanDenseWithBN(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = QuanDenseWithBN(n_units=5, use_gemm=True) - out = layer(self.ni) - self.fail('use gemm') - except Exception as e: - print(e) - class Layer_TernaryDense_Test(CustomTestCase): @classmethod - def setUpClass(cls): + def setUpClass(self): print("-" * 20, "Layer_BinaryDense_Test", "-" * 20) - cls.batch_size = 4 - cls.inputs_shape = [cls.batch_size, 10] + self.batch_size = 4 + self.inputs_shape = [self.batch_size, 10] - cls.ni = Input(cls.inputs_shape, name='input_layer') - cls.layer1 = TernaryDense(n_units=5) - nn = cls.layer1(cls.ni) - cls.layer1._nodes_fixed = True - cls.M = Model(inputs=cls.ni, outputs=nn) + self.inputs = tl.layers.Input(self.inputs_shape, name='input_layer') + self.layer1 = tl.layers.TernaryDense(n_units=5) + self.layer2 = tl.layers.TernaryDense(n_units=5, in_channels=10) - cls.layer2 = TernaryDense(n_units=5, in_channels=10) - cls.layer2._nodes_fixed = True - - cls.inputs = tf.ones((cls.inputs_shape)) - cls.n1 = cls.layer1(cls.inputs) - cls.n2 = cls.layer2(cls.inputs) - cls.n3 = cls.M(cls.inputs, is_train=True) - - print(cls.layer1) - print(cls.layer2) + self.n1 = self.layer1(self.inputs) + self.n2 = self.layer2(self.inputs) @classmethod def tearDownClass(cls): @@ -335,26 +173,6 @@ def test_layer_n2(self): print(np.unique(self.n2.numpy().reshape(-1))) print(self.n2[0]) - def test_model_n3(self): - print(np.unique(self.n3.numpy().reshape(-1))) - print(self.n3[0]) - - def test_exception(self): - try: - layer = TernaryDense(n_units=5) - inputs = Input([4, 10, 5], name='ill_inputs') - out = layer(inputs) - self.fail('ill inputs') - except Exception as e: - print(e) - - try: - layer = TernaryDense(n_units=5, use_gemm=True) - out = layer(self.ni) - self.fail('use gemm') - except Exception as e: - print(e) - if __name__ == '__main__': diff --git a/tests/layers/test_layers_embedding.py b/tests/layers/test_layers_embedding.py index 4377b79a7..832e47bbe 100644 --- a/tests/layers/test_layers_embedding.py +++ b/tests/layers/test_layers_embedding.py @@ -4,13 +4,12 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase +import numpy as np -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Embed_Test(CustomTestCase): @@ -24,37 +23,34 @@ def tearDownClass(cls): pass def test_onehot(self): - input = tl.layers.Input([32], dtype=tf.int32) + input = tl.layers.Input([32], dtype=tl.int32) onehot = tl.layers.OneHot(depth=8, on_value=1, off_value=0, axis=-1) print(onehot) tensor = tl.layers.OneHot(depth=8)(input) self.assertEqual(tensor.get_shape().as_list(), [32, 8]) - model = tl.models.Model(inputs=input, outputs=tensor) def test_embed(self): - input = tl.layers.Input([8, 100], dtype=tf.int32) + input = tl.layers.Input([8, 100], dtype=tl.int32) embed = tl.layers.Embedding(vocabulary_size=1000, embedding_size=50, name='embed') print(embed) tensor = embed(input) self.assertEqual(tensor.get_shape().as_list(), [8, 100, 50]) - model = tl.models.Model(inputs=input, outputs=tensor) def test_avg_embed(self): batch_size = 8 length = 5 - input = tl.layers.Input([batch_size, length], dtype=tf.int32) + input = tl.layers.Input([batch_size, length], dtype=tl.int32) avgembed = tl.layers.AverageEmbedding(vocabulary_size=1000, embedding_size=50, name='avg') print(avgembed) tensor = avgembed(input) # print(tensor) self.assertEqual(tensor.get_shape().as_list(), [batch_size, 50]) - model = tl.models.Model(inputs=input, outputs=tensor) def test_word2vec_nce(self): batch_size = 8 embedding_size = 50 - inputs = tl.layers.Input([batch_size], dtype=tf.int32) - labels = tl.layers.Input([batch_size, 1], dtype=tf.int32) + inputs = tl.layers.Input([batch_size], dtype=tl.int32) + labels = tl.layers.Input([batch_size, 1], dtype=tl.int32) emb_net = tl.layers.Word2vecEmbedding( vocabulary_size=10000, embedding_size=embedding_size, @@ -66,38 +62,21 @@ def test_word2vec_nce(self): nce_b_init=tl.initializers.constant(value=0.0), ) print(emb_net) - try: - embed_tensor, embed_nce_loss = emb_net(inputs) - except ValueError as e: - print(e) - try: - embed_tensor = emb_net(inputs, use_nce_loss=False) - print("Not use NCE without labels") - except Exception as e: - print(e) + embed_tensor = emb_net([inputs, labels], use_nce_loss=False) embed_tensor, embed_nce_loss = emb_net([inputs, labels], use_nce_loss=True) embed_tensor, embed_nce_loss = emb_net([inputs, labels]) self.assertEqual(embed_tensor.get_shape().as_list(), [batch_size, embedding_size]) - outputs = tl.layers.Dense(n_units=10)(embed_tensor) - model = tl.models.Model(inputs=[inputs, labels], outputs=[outputs, embed_nce_loss]) - out, nce = model( - [np.random.randint(0, 1, size=[batch_size]), - np.random.randint(0, 1, size=[batch_size, 1])], is_train=True - ) - self.assertEqual(out.get_shape().as_list(), [batch_size, 10]) - print(nce) - def test_word2vec_no_nce(self): batch_size = 8 embedding_size = 50 - inputs = tl.layers.Input([batch_size], dtype=tf.int32) + inputs = tl.layers.Input([batch_size], dtype=tl.int32) emb_net = tl.layers.Word2vecEmbedding( vocabulary_size=10000, embedding_size=embedding_size, num_sampled=100, - activate_nce_loss=False, # the nce loss is activated + activate_nce_loss=False, nce_loss_args={}, E_init=tl.initializers.random_uniform(minval=-1.0, maxval=1.0), nce_W_init=tl.initializers.truncated_normal(stddev=float(1.0 / np.sqrt(embedding_size))), @@ -111,7 +90,6 @@ def test_word2vec_no_nce(self): except AttributeError as e: print(e) self.assertEqual(embed_tensor.get_shape().as_list(), [batch_size, embedding_size]) - model = tl.models.Model(inputs=inputs, outputs=embed_tensor) if __name__ == '__main__': diff --git a/tests/layers/test_layers_extend.py b/tests/layers/test_layers_extend.py index 5d4decc60..22b685618 100644 --- a/tests/layers/test_layers_extend.py +++ b/tests/layers/test_layers_extend.py @@ -4,12 +4,11 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Extend_Test(CustomTestCase): @@ -26,15 +25,13 @@ def test_expand_dims(self): x = tl.layers.Input([8, 3]) expandlayer = tl.layers.ExpandDims(axis=-1) y = expandlayer(x) - print(expandlayer) - self.assertEqual(y.get_shape().as_list(), [8, 3, 1]) + self.assertEqual(tl.get_tensor_shape(y), [8, 3, 1]) def test_tile(self): x = tl.layers.Input([8, 3]) tilelayer = tl.layers.Tile(multiples=[2, 3]) y = tilelayer(x) - print(tilelayer) - self.assertEqual(y.get_shape().as_list(), [16, 9]) + self.assertEqual(tl.get_tensor_shape(y), [16, 9]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_lambda.py b/tests/layers/test_layers_lambda.py index cb487e86f..d2ab4a107 100644 --- a/tests/layers/test_layers_lambda.py +++ b/tests/layers/test_layers_lambda.py @@ -3,14 +3,14 @@ import os import unittest - import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Lambda_Test(CustomTestCase): @@ -34,7 +34,7 @@ def test_lambda_keras(self): # in order to get trainable_variables of keras _ = perceptron(np.random.random([100, 5]).astype(np.float32)) - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -51,7 +51,7 @@ def forward(self, x): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() for epoch in range(10): with tf.GradientTape() as tape: @@ -73,7 +73,7 @@ def customize_func(x, foo=42): else: return tf.identity(x) - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -90,7 +90,7 @@ def forward(self, x, bar): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() out, out2 = model(self.data_x, bar=-1) self.assertTrue(np.array_equal(out2.numpy(), tf.nn.relu(out).numpy())) @@ -108,7 +108,7 @@ def test_lambda_func_with_weight(self): def customize_fn(x): return x + a - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -122,14 +122,14 @@ def forward(self, x): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() out = model(self.data_x) print(out.shape) def test_lambda_func_without_args(self): - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -143,7 +143,7 @@ def forward(self, x): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() out, out2 = model(self.data_x) self.assertTrue(np.array_equal(out2.numpy(), out.numpy() * 2)) @@ -153,7 +153,7 @@ def test_elementwiselambda_func_with_args(self): def customize_func(noise, mean, std, foo=42): return mean + noise * tf.exp(std * 0.5) + foo - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -174,7 +174,7 @@ def forward(self, x, bar=None): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() noise, mean, std, out = model(self.data_x) self.assertTrue(np.allclose(out.numpy(), customize_func(noise, mean, std, foo=1024).numpy())) @@ -186,7 +186,7 @@ def test_elementwiselambda_func_without_args(self): def customize_func(noise, mean, std): return mean + noise * tf.exp(std * 0.5) - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -204,7 +204,7 @@ def forward(self, x): model = CustomizeModel() print(model.lambdalayer) - model.train() + model.set_train() noise, mean, std, out = model(self.data_x) self.assertTrue(np.array_equal(out.numpy(), customize_func(noise, mean, std).numpy())) diff --git a/tests/layers/test_layers_merge.py b/tests/layers/test_layers_merge.py index 75e711054..0aeb763aa 100644 --- a/tests/layers/test_layers_merge.py +++ b/tests/layers/test_layers_merge.py @@ -4,13 +4,12 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import numpy as np import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Merge_Test(CustomTestCase): @@ -25,12 +24,12 @@ def tearDownClass(cls): def test_concat(self): - class CustomModel(tl.models.Model): + class CustomModel(tl.layers.Module): def __init__(self): super(CustomModel, self).__init__() - self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') + self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') self.concat = tl.layers.Concat(concat_dim=1, name='concat_layer') def forward(self, inputs): @@ -40,8 +39,8 @@ def forward(self, inputs): return outputs model = CustomModel() - model.train() - inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) + model.set_train() + inputs = tl.ops.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) outputs = model(inputs) print(model) @@ -49,13 +48,13 @@ def forward(self, inputs): def test_elementwise(self): - class CustomModel(tl.models.Model): + class CustomModel(tl.layers.Module): def __init__(self): super(CustomModel, self).__init__() - self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') - self.element = tl.layers.Elementwise(combine_fn=tf.minimum, name='minimum', act=tf.identity) + self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') + self.element = tl.layers.Elementwise(combine_fn=tl.minimum, name='minimum', act=None) def forward(self, inputs): d1 = self.dense1(inputs) @@ -64,12 +63,12 @@ def forward(self, inputs): return outputs, d1, d2 model = CustomModel() - model.train() - inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) + model.set_train() + inputs = tl.ops.convert_to_tensor(np.random.random([4, 20]).astype(np.float32)) outputs, d1, d2 = model(inputs) print(model) - min = tf.minimum(d1, d2) + min = tl.ops.minimum(d1, d2) self.assertEqual(outputs.get_shape().as_list(), [4, 10]) self.assertTrue(np.array_equal(min.numpy(), outputs.numpy())) diff --git a/tests/layers/test_layers_noise.py b/tests/layers/test_layers_noise.py index 056410ba1..2a8b4fe1e 100644 --- a/tests/layers/test_layers_noise.py +++ b/tests/layers/test_layers_noise.py @@ -4,13 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl from tensorlayer.layers import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Convolution_1D_Test(CustomTestCase): @@ -23,12 +22,10 @@ def setUpClass(cls): cls.inputs_shape = [cls.batch_size, 200] cls.input_layer = Input(cls.inputs_shape, name='input_layer') - cls.dense = tl.layers.Dense(n_units=100, act=tf.nn.relu, in_channels=200)(cls.input_layer) + cls.dense = tl.layers.Dense(n_units=100, act=tl.ReLU, in_channels=200)(cls.input_layer) cls.noiselayer = tl.layers.GaussianNoise(name='gaussian')(cls.dense) - print("Testing GaussianNoise: \n", cls.noiselayer._info[0].layer) - @classmethod def tearDownClass(cls): pass @@ -39,6 +36,6 @@ def test_layer_n1(self): if __name__ == '__main__': - tl.logging.set_verbosity(tl.logging.DEBUG) + # tl.logging.set_verbosity(tl.logging.DEBUG) unittest.main() diff --git a/tests/layers/test_layers_normalization.py b/tests/layers/test_layers_normalization.py index b6bb30ad2..51cd41387 100644 --- a/tests/layers/test_layers_normalization.py +++ b/tests/layers/test_layers_normalization.py @@ -4,15 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl from tensorlayer.layers import * -from tensorlayer.models import Model from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - class Laye_BatchNorm_Test(CustomTestCase): @@ -25,31 +22,28 @@ def setUpClass(cls): x_3_input_shape = [None, 100, 100, 100, 3] batchsize = 2 - cls.x0 = tf.random.normal([batchsize] + x_0_input_shape[1:]) - cls.x1 = tf.random.normal([batchsize] + x_1_input_shape[1:]) - cls.x2 = tf.random.normal([batchsize] + x_2_input_shape[1:]) - cls.x3 = tf.random.normal([batchsize] + x_3_input_shape[1:]) + cls.x0 = tl.ops.truncated_normal(shape=[batchsize] + x_0_input_shape[1:]) + cls.x1 = tl.ops.truncated_normal([batchsize] + x_1_input_shape[1:]) + cls.x2 = tl.ops.truncated_normal([batchsize] + x_2_input_shape[1:]) + cls.x3 = tl.ops.truncated_normal([batchsize] + x_3_input_shape[1:]) ## Base ni_1 = Input(x_1_input_shape, name='test_ni1') nn_1 = Conv1d(n_filter=32, filter_size=5, stride=2, name='test_conv1d')(ni_1) n1_b = BatchNorm(name='test_conv')(nn_1) cls.n1_b = n1_b - cls.base_1d = Model(inputs=ni_1, outputs=n1_b, name='test_base_1d') ni_2 = Input(x_2_input_shape, name='test_ni2') nn_2 = Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d')(ni_2) n2_b = BatchNorm(name='test_bn2d')(nn_2) cls.n2_b = n2_b - cls.base_2d = Model(inputs=ni_2, outputs=n2_b, name='test_base_2d') ni_3 = Input(x_3_input_shape, name='test_ni2') nn_3 = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d')(ni_3) n3_b = BatchNorm(name='test_bn3d')(nn_3) cls.n3_b = n3_b - cls.base_3d = Model(inputs=ni_3, outputs=n3_b, name='test_base_3d') - class bn_0d_model(Model): + class bn_0d_model(tl.layers.Module): def __init__(self): super(bn_0d_model, self).__init__() @@ -61,7 +55,7 @@ def forward(self, x): return x dynamic_base = bn_0d_model() - cls.n0_b = dynamic_base(cls.x0, is_train=True) + cls.n0_b = dynamic_base(cls.x0) ## 0D ======================================================================== @@ -72,9 +66,7 @@ def forward(self, x): cls.n0 = n0 - cls.static_0d = Model(inputs=nin_0, outputs=n0) - - class bn_0d_model(Model): + class bn_0d_model(tl.layers.Module): def __init__(self): super(bn_0d_model, self).__init__(name='test_bn_0d_model') @@ -87,10 +79,6 @@ def forward(self, x): cls.dynamic_0d = bn_0d_model() - print("Printing BatchNorm0d") - print(cls.static_0d) - print(cls.dynamic_0d) - ## 1D ======================================================================== nin_1 = Input(x_1_input_shape, name='test_in1') @@ -100,9 +88,7 @@ def forward(self, x): cls.n1 = n1 - cls.static_1d = Model(inputs=nin_1, outputs=n1) - - class bn_1d_model(Model): + class bn_1d_model(tl.layers.Module): def __init__(self): super(bn_1d_model, self).__init__(name='test_bn_1d_model') @@ -115,10 +101,6 @@ def forward(self, x): cls.dynamic_1d = bn_1d_model() - print("Printing BatchNorm1d") - print(cls.static_1d) - print(cls.dynamic_1d) - ## 2D ======================================================================== nin_2 = Input(x_2_input_shape, name='test_in2') @@ -128,9 +110,7 @@ def forward(self, x): cls.n2 = n2 - cls.static_2d = Model(inputs=nin_2, outputs=n2) - - class bn_2d_model(Model): + class bn_2d_model(tl.layers.Module): def __init__(self): super(bn_2d_model, self).__init__(name='test_bn_2d_model') @@ -143,22 +123,16 @@ def forward(self, x): cls.dynamic_2d = bn_2d_model() - print("Printing BatchNorm1d") - print(cls.static_2d) - print(cls.dynamic_2d) - ## 3D ======================================================================== nin_3 = Input(x_3_input_shape, name='test_in3') n3 = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d')(nin_3) - n3 = BatchNorm3d(name='test_bn3d', act=tf.nn.relu)(n3) + n3 = BatchNorm3d(name='test_bn3d', act=tl.ReLU)(n3) cls.n3 = n3 - cls.static_3d = Model(inputs=nin_3, outputs=n3) - - class bn_3d_model(Model): + class bn_3d_model(tl.layers.Module): def __init__(self): super(bn_3d_model, self).__init__(name='test_bn_3d_model') @@ -173,9 +147,6 @@ def forward(self, x): cls.dynamic_3d = bn_3d_model() - print("Printing BatchNorm1d") - print(cls.static_3d) - print(cls.dynamic_3d) @classmethod def tearDownClass(cls): @@ -184,37 +155,25 @@ def tearDownClass(cls): def test_BatchNorm(self): self.assertEqual(self.n1_b.shape[1:], (50, 32)) - out = self.base_1d(self.x1, is_train=True) self.assertEqual(self.n2_b.shape[1:], (50, 50, 32)) - out = self.base_2d(self.x2, is_train=True) self.assertEqual(self.n3_b.shape[1:], (50, 50, 50, 32)) - out = self.base_3d(self.x3, is_train=True) self.assertEqual(self.n0_b.shape[1:], (32)) print("test_BatchNorm OK") def test_BatchNorm0d(self): self.assertEqual(self.n0.shape[1:], (32)) - out = self.static_0d(self.x0, is_train=True) - out = self.dynamic_0d(self.x0, is_train=True) def test_BatchNorm1d(self): self.assertEqual(self.n1.shape[1:], (50, 32)) - out = self.static_1d(self.x1, is_train=True) - out = self.dynamic_1d(self.x1, is_train=True) def test_BatchNorm2d(self): self.assertEqual(self.n2.shape[1:], (50, 50, 32)) - out = self.static_2d(self.x2, is_train=True) - out = self.dynamic_2d(self.x2, is_train=True) - out = self.dynamic_2d(self.x2, is_train=False) def test_BatchNorm3d(self): self.assertEqual(self.n3.shape[1:], (50, 50, 50, 32)) - out = self.static_3d(self.x3, is_train=True) - out = self.dynamic_3d(self.x3, is_train=True) def test_dataformat(self): bn1d = BatchNorm1d(data_format='channels_first', num_features=32) @@ -242,26 +201,6 @@ def test_exception(self): self.assertIsInstance(e, ValueError) print(e) - def test_input_shape(self): - try: - bn = BatchNorm1d(num_features=32) - out = bn(self.x2) - except Exception as e: - self.assertIsInstance(e, ValueError) - print(e) - try: - bn = BatchNorm2d(num_features=32) - out = bn(self.x3) - except Exception as e: - self.assertIsInstance(e, ValueError) - print(e) - try: - bn = BatchNorm3d(num_features=32) - out = bn(self.x1) - except Exception as e: - self.assertIsInstance(e, ValueError) - print(e) - if __name__ == '__main__': diff --git a/tests/layers/test_layers_padding.py b/tests/layers/test_layers_padding.py index a92da5197..fa3e13bb1 100644 --- a/tests/layers/test_layers_padding.py +++ b/tests/layers/test_layers_padding.py @@ -4,12 +4,11 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Padding_Test(CustomTestCase): @@ -23,9 +22,6 @@ def setUpClass(cls): n1 = tl.layers.ZeroPad1d(padding=1)(cls.input_layer1) n2 = tl.layers.ZeroPad1d(padding=(2, 3))(cls.input_layer1) - print(n1._info[0].layer) - print(n2._info[0].layer) - cls.n1_shape = n1.get_shape().as_list() cls.n2_shape = n2.get_shape().as_list() @@ -37,13 +33,7 @@ def setUpClass(cls): n4 = tl.layers.ZeroPad2d(padding=(2, 3))(cls.input_layer2) n5 = tl.layers.ZeroPad2d(padding=((3, 3), (4, 4)))(cls.input_layer2) - print(n0._info[0].layer) - print(n3._info[0].layer) - print(n4._info[0].layer) - print(n5._info[0].layer) - cls.n0_shape = n0.get_shape().as_list() - print(cls.n0_shape) cls.n3_shape = n3.get_shape().as_list() cls.n4_shape = n4.get_shape().as_list() cls.n5_shape = n5.get_shape().as_list() @@ -55,10 +45,6 @@ def setUpClass(cls): n7 = tl.layers.ZeroPad3d(padding=(2, 3, 4))(cls.input_layer3) n8 = tl.layers.ZeroPad3d(padding=((3, 3), (4, 4), (5, 5)))(cls.input_layer3) - print(n6._info[0].layer) - print(n7._info[0].layer) - print(n8._info[0].layer) - cls.n6_shape = n6.get_shape().as_list() cls.n7_shape = n7.get_shape().as_list() cls.n8_shape = n8.get_shape().as_list() diff --git a/tests/layers/test_layers_pooling.py b/tests/layers/test_layers_pooling.py index 5ab3e3e98..39582aa28 100644 --- a/tests/layers/test_layers_pooling.py +++ b/tests/layers/test_layers_pooling.py @@ -4,13 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl from tensorlayer.layers import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Pooling_Test(CustomTestCase): @@ -40,16 +39,6 @@ def setUpClass(cls): cls.n16_shape = n16.get_shape().as_list() cls.n17_shape = n17.get_shape().as_list() - print("Printing Pool1d") - print(nin_1._info[0].layer) - print(n1._info[0].layer) - print(n2._info[0].layer) - print(n3._info[0].layer) - print(n4._info[0].layer) - print(n5._info[0].layer) - print(n16._info[0].layer) - print(n17._info[0].layer) - ## 2D ======================================================================== x_2_input_shape = [None, 100, 100, 3] @@ -61,7 +50,7 @@ def setUpClass(cls): n9 = tl.layers.GlobalMaxPool2d(name='test_maxpool2d')(n6) n10 = tl.layers.GlobalMeanPool2d(name='test_meanpool2d')(n6) n15 = tl.layers.PoolLayer(name='test_pool2d')(n6) - n18 = tl.layers.CornerPool2d('TopLeft', name='test_cornerpool2d')(n6) + # n18 = tl.layers.CornerPool2d('TopLeft', name='test_cornerpool2d')(n6) cls.n6_shape = n6.get_shape().as_list() cls.n7_shape = n7.get_shape().as_list() @@ -69,17 +58,8 @@ def setUpClass(cls): cls.n9_shape = n9.get_shape().as_list() cls.n10_shape = n10.get_shape().as_list() cls.n15_shape = n15.get_shape().as_list() - cls.n18_shape = n18.get_shape().as_list() - - print("Printing Pool2d") - print(nin_2._info[0].layer) - print(n6._info[0].layer) - print(n7._info[0].layer) - print(n8._info[0].layer) - print(n9._info[0].layer) - print(n10._info[0].layer) - print(n15._info[0].layer) - print(n18._info[0].layer) + # cls.n18_shape = n18.get_shape().as_list() + ## 3D ======================================================================== @@ -98,13 +78,6 @@ def setUpClass(cls): cls.n13_shape = n13.get_shape().as_list() cls.n14_shape = n14.get_shape().as_list() - print("Printing Pool3d") - print(nin_3._info[0].layer) - print(n11._info[0].layer) - print(n12._info[0].layer) - print(n13._info[0].layer) - print(n14._info[0].layer) - @classmethod def tearDownClass(cls): pass @@ -159,10 +132,10 @@ def test_n16_shape(self): self.assertEqual(self.n16_shape[1:4], [46, 32]) def test_n17_shape(self): - self.assertEqual(self.n17_shape[1:4], [48, 32]) + self.assertEqual(self.n17_shape[1:4], [46, 32]) - def test_n18_shape(self): - self.assertEqual(self.n18_shape[1:], [50, 50, 32]) + # def test_n18_shape(self): + # self.assertEqual(self.n18_shape[1:], [50, 50, 32]) if __name__ == '__main__': diff --git a/tests/layers/test_layers_recurrent.py b/tests/layers/test_layers_recurrent.py index 6f9eff3ea..011327e71 100644 --- a/tests/layers/test_layers_recurrent.py +++ b/tests/layers/test_layers_recurrent.py @@ -4,13 +4,12 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import numpy as np import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_RNN_Test(CustomTestCase): diff --git a/tests/layers/test_layers_resampling.py b/tests/layers/test_layers_resampling.py index 643303558..4f0bbc903 100644 --- a/tests/layers/test_layers_resampling.py +++ b/tests/layers/test_layers_resampling.py @@ -1,18 +1,17 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import os import sys +sys.path.append("/home/wurundi/workspace/tensorlayer2") + +import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl from tensorlayer.layers import * -from tests.utils import CustomTestCase -sys.path.append("/home/wurundi/workspace/tensorlayer2") - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Pooling_Test(CustomTestCase): @@ -43,14 +42,6 @@ def setUpClass(cls): cls.n9_shape = n9.get_shape().as_list() cls.n10_shape = n10.get_shape().as_list() - print("Printing UpSampling2d") - print(nin_2._info[0].layer) - print(n6._info[0].layer) - print(n7._info[0].layer) - print(n8._info[0].layer) - print(n9._info[0].layer) - print(n10._info[0].layer) - @classmethod def tearDownClass(cls): pass diff --git a/tests/layers/test_layers_scale.py b/tests/layers/test_layers_scale.py index fdf5228ed..c87e560af 100644 --- a/tests/layers/test_layers_scale.py +++ b/tests/layers/test_layers_scale.py @@ -4,13 +4,11 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Scale_Test(CustomTestCase): @@ -24,16 +22,22 @@ def tearDownClass(cls): pass def test_scale(self): - inputs = tl.layers.Input([8, 3]) - dense = tl.layers.Dense(n_units=10)(inputs) - scalelayer = tl.layers.Scale(init_scale=0.5) - outputs = scalelayer(dense) - model = tl.models.Model(inputs=inputs, outputs=[dense, outputs]) - - print(scalelayer) - data = np.random.random(size=[8, 3]).astype(np.float32) - dout, fout = model(data, is_train=True) + class model(tl.layers.Module): + def __init__(self): + super(model, self).__init__() + self.dense = tl.layers.Dense(n_units=10) + self.scalelayer = tl.layers.Scale(init_scale=0.5) + + def forward(self, inputs): + output1 = self.dense(inputs) + output2 = self.scalelayer(output1) + return output1, output2 + + input = tl.layers.Input((8, 3), init=tl.initializers.random_normal()) + net = model() + net.set_train() + dout, fout = net(input) for i in range(len(dout)): for j in range(len(dout[i])): diff --git a/tests/layers/test_layers_shape.py b/tests/layers/test_layers_shape.py index 2ece6b0b7..139a3f8ed 100644 --- a/tests/layers/test_layers_shape.py +++ b/tests/layers/test_layers_shape.py @@ -3,22 +3,21 @@ import os import unittest - import numpy as np -import tensorflow as tf + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Shape_Test(CustomTestCase): @classmethod def setUpClass(cls): - cls.data = np.random.random(size=[8, 4, 3]).astype(np.float32) - cls.imgdata = np.random.random(size=[2, 16, 16, 8]).astype(np.float32) + cls.data = tl.layers.Input(shape=(8, 4, 3), init=tl.initializers.random_normal()) + cls.imgdata = tl.layers.Input(shape=(2, 16, 16, 8), init=tl.initializers.random_normal()) @classmethod def tearDownClass(cls): @@ -26,7 +25,7 @@ def tearDownClass(cls): def test_flatten(self): - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -37,13 +36,13 @@ def forward(self, x): model = CustomizeModel() print(model.flatten) - model.train() + model.set_train() out = model(self.data) self.assertEqual(out.get_shape().as_list(), [8, 12]) def test_reshape(self): - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -58,7 +57,7 @@ def forward(self, x): print(model.reshape1) print(model.reshape2) print(model.reshape3) - model.train() + model.set_train() out1, out2, out3 = model(self.data) self.assertEqual(out1.get_shape().as_list(), [8, 12]) self.assertEqual(out2.get_shape().as_list(), [8, 12]) @@ -66,7 +65,7 @@ def forward(self, x): def test_transpose(self): - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self): super(CustomizeModel, self).__init__() @@ -78,15 +77,16 @@ def __init__(self): def forward(self, x): return self.transpose1(x), self.transpose2(x), self.transpose3(x), self.transpose4(x) - real = np.random.random([8, 4, 3]).astype(np.float32) - comp = np.random.random([8, 4, 3]).astype(np.float32) - complex_data = real + 1j * comp + real = tl.layers.Input(shape=(8, 4, 3), init=tl.initializers.random_normal()) + comp = tl.layers.Input(shape=(8, 4, 3), init=tl.initializers.random_normal()) + import tensorflow as tf + complex_data = tf.dtypes.complex(real, comp) model = CustomizeModel() print(model.transpose1) print(model.transpose2) print(model.transpose3) print(model.transpose4) - model.train() + model.set_train() out1, out2, out3, out4 = model(self.data) self.assertEqual(out1.get_shape().as_list(), [3, 4, 8]) self.assertEqual(out2.get_shape().as_list(), [3, 4, 8]) @@ -103,7 +103,7 @@ def forward(self, x): def test_shuffle(self): - class CustomizeModel(tl.models.Model): + class CustomizeModel(tl.layers.Module): def __init__(self, x): super(CustomizeModel, self).__init__() @@ -114,18 +114,9 @@ def forward(self, x): model = CustomizeModel(2) print(model.shuffle) - model.train() + model.set_train() out = model(self.imgdata) self.assertEqual(out.get_shape().as_list(), [2, 16, 16, 8]) - try: - model_fail = CustomizeModel(3) - print(model_fail.shuffle) - model_fail.train() - out = model_fail(self.imgdata) - self.assertEqual(out.get_shape().as_list(), [2, 16, 16, 8]) - except Exception as e: - self.assertIsInstance(e, ValueError) - print(e) if __name__ == '__main__': diff --git a/tests/layers/test_layers_stack.py b/tests/layers/test_layers_stack.py index 4005c61e8..6a703cb82 100644 --- a/tests/layers/test_layers_stack.py +++ b/tests/layers/test_layers_stack.py @@ -3,14 +3,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorlayer as tl from tensorlayer.layers import * -from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Stack_Test(CustomTestCase): @@ -22,16 +20,29 @@ def setUpClass(cls): cls.inputs_shape = [cls.batch_size, 10] cls.ni = Input(cls.inputs_shape, name='input_layer') + class model(tl.layers.Module): + def __init__(self): + super(model, self).__init__() + self.a = Dense(n_units=5) + self.b = Dense(n_units=5) + self.stack = Stack(axis=1) + + def forward(self, inputs): + output1 = self.a(inputs) + output2 = self.b(inputs) + output = self.stack([output1, output2]) + return output + a = Dense(n_units=5)(cls.ni) b = Dense(n_units=5)(cls.ni) cls.layer1 = Stack(axis=1) cls.n1 = cls.layer1([a, b]) - cls.M = Model(inputs=cls.ni, outputs=cls.n1) - cls.inputs = tf.random.uniform(cls.inputs_shape) - cls.n2 = cls.M(cls.inputs, is_train=True) + net = model() + net.set_train() + cls.inputs = Input(cls.inputs_shape) + cls.n2 = net(cls.inputs) - print(cls.layer1) @classmethod def tearDownClass(cls): @@ -54,12 +65,25 @@ def setUpClass(cls): cls.ni = Input(cls.inputs_shape, name='input_layer') a = Dense(n_units=5)(cls.ni) - cls.layer1 = UnStack(axis=1) # unstack in channel axis + cls.layer1 = UnStack(axis=1) cls.n1 = cls.layer1(a) - cls.M = Model(inputs=cls.ni, outputs=cls.n1) - cls.inputs = tf.random.uniform(cls.inputs_shape) - cls.n2 = cls.M(cls.inputs, is_train=True) + class model(tl.layers.Module): + def __init__(self): + super(model, self).__init__() + self.a = Dense(n_units=5) + self.unstack = UnStack(axis=1) + + def forward(self, inputs): + output1 = self.a(inputs) + output = self.unstack(output1) + return output + + + cls.inputs = Input(cls.inputs_shape) + net = model() + net.set_train() + cls.n2 = net(cls.inputs) print(cls.layer1) diff --git a/tests/models/test_auto_naming.py b/tests/models/test_auto_naming.py index 65337a8c9..fb8f03720 100644 --- a/tests/models/test_auto_naming.py +++ b/tests/models/test_auto_naming.py @@ -3,15 +3,15 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf - import tensorlayer as tl from tensorlayer.layers import * from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def basic_static_model(name=None, conv1_name="conv1", conv2_name="conv2"): diff --git a/tests/models/test_keras_save.py b/tests/models/test_keras_save.py index caadd6574..2d40b31ef 100644 --- a/tests/models/test_keras_save.py +++ b/tests/models/test_keras_save.py @@ -1,8 +1,8 @@ -import tensorflow as tf -from tensorflow.python.keras import Model from tensorflow.python.keras.applications import VGG16 -from tensorflow.python.keras.layers import Conv2D, Dense +from tensorflow.python.keras.layers import Dense, Conv2D +from tensorflow.python.keras import Model from tensorflow.python.training import saver +import tensorflow as tf # get the whole model # vgg = VGG16(weights=None) diff --git a/tests/models/test_model_core.py b/tests/models/test_model_core.py index 0a98e154d..3db470f9d 100644 --- a/tests/models/test_model_core.py +++ b/tests/models/test_model_core.py @@ -3,15 +3,15 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf - import tensorlayer as tl from tensorlayer.layers import * from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def basic_static_model(): diff --git a/tests/models/test_model_save.py b/tests/models/test_model_save.py index 001e9a3df..ba224ee25 100644 --- a/tests/models/test_model_save.py +++ b/tests/models/test_model_save.py @@ -3,15 +3,15 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf - import tensorlayer as tl from tensorlayer.layers import * from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def basic_static_model(include_top=True): @@ -80,6 +80,7 @@ def setUpClass(cls): print([l.name for l in cls.dynamic_basic.all_layers]) print([l.name for l in cls.dynamic_basic_skip.all_layers]) + pass @classmethod def tearDownClass(cls): diff --git a/tests/models/test_model_save_graph.py b/tests/models/test_model_save_graph.py index 1e9b898a1..3e527159d 100644 --- a/tests/models/test_model_save_graph.py +++ b/tests/models/test_model_save_graph.py @@ -4,15 +4,15 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf - import tensorlayer as tl from tensorlayer.layers import * from tensorlayer.models import * -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def RemoveDateInConfig(config): diff --git a/tests/models/test_seq2seq_model.py b/tests/models/test_seq2seq_model.py index 52939e764..d77aa47ba 100644 --- a/tests/models/test_seq2seq_model.py +++ b/tests/models/test_seq2seq_model.py @@ -4,17 +4,16 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf -from sklearn.utils import shuffle -from tqdm import tqdm - import tensorlayer as tl -from tensorlayer.cost import cross_entropy_seq +from tqdm import tqdm +from sklearn.utils import shuffle from tensorlayer.models.seq2seq import Seq2seq from tests.utils import CustomTestCase - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tensorlayer.cost import cross_entropy_seq class Model_SEQ2SEQ_Test(CustomTestCase): diff --git a/tests/models/test_seq2seq_with_attention.py b/tests/models/test_seq2seq_with_attention.py index 9cfc07cec..d7dbeae34 100644 --- a/tests/models/test_seq2seq_with_attention.py +++ b/tests/models/test_seq2seq_with_attention.py @@ -4,17 +4,16 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np import tensorflow as tf -from sklearn.utils import shuffle -from tqdm import tqdm - import tensorlayer as tl -from tensorlayer.cost import cross_entropy_seq +from tqdm import tqdm +from sklearn.utils import shuffle from tensorlayer.models.seq2seq_with_attention import Seq2seqLuongAttention from tests.utils import CustomTestCase - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tensorlayer.cost import cross_entropy_seq class Model_SEQ2SEQ_WITH_ATTENTION_Test(CustomTestCase): diff --git a/tests/pending/test_array_ops.py b/tests/pending/test_array_ops.py index 7813e286e..56b80d485 100644 --- a/tests/pending/test_array_ops.py +++ b/tests/pending/test_array_ops.py @@ -4,13 +4,14 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import numpy as np + +from tests.utils import CustomTestCase class Array_Op_Alphas_Test(CustomTestCase): diff --git a/tests/pending/test_decorators.py b/tests/pending/test_decorators.py index fbe91b2ba..cc8878543 100644 --- a/tests/pending/test_decorators.py +++ b/tests/pending/test_decorators.py @@ -4,13 +4,14 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl + from tensorlayer.decorators import private_method -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Pooling_Test(CustomTestCase): diff --git a/tests/pending/test_documentation.py b/tests/pending/test_documentation.py index 332a5cb03..211142e8d 100755 --- a/tests/pending/test_documentation.py +++ b/tests/pending/test_documentation.py @@ -4,10 +4,10 @@ import os import unittest -from sphinx.application import Sphinx - os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from sphinx.application import Sphinx + class DocTest(unittest.TestCase): source_dir = u'docs/' diff --git a/tests/pending/test_layers_basic.py b/tests/pending/test_layers_basic.py index 209663bd2..2771f961a 100644 --- a/tests/pending/test_layers_basic.py +++ b/tests/pending/test_layers_basic.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Basic_Test(CustomTestCase): diff --git a/tests/pending/test_layers_flow_control.py b/tests/pending/test_layers_flow_control.py index b82c460b6..d86eb217a 100644 --- a/tests/pending/test_layers_flow_control.py +++ b/tests/pending/test_layers_flow_control.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Flow_Control_Test(CustomTestCase): diff --git a/tests/pending/test_layers_importer.py b/tests/pending/test_layers_importer.py index c5a2f0d3c..1c1321acb 100644 --- a/tests/pending/test_layers_importer.py +++ b/tests/pending/test_layers_importer.py @@ -4,17 +4,20 @@ import os import unittest -import tensorflow as tf -from tensorflow.contrib.slim.python.slim.nets.inception_v3 import (inception_v3, inception_v3_arg_scope) +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -import tensorlayer as tl -from tests.utils import CustomTestCase +import tensorflow as tf -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3 +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_arg_scope slim = tf.contrib.slim keras = tf.keras +import tensorlayer as tl + +from tests.utils import CustomTestCase + class Layer_Importer_Test(CustomTestCase): diff --git a/tests/pending/test_layers_normalization.py b/tests/pending/test_layers_normalization.py index e6fd8bd81..d0891abf1 100644 --- a/tests/pending/test_layers_normalization.py +++ b/tests/pending/test_layers_normalization.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def model(x, is_train=True, reuse=False): diff --git a/tests/pending/test_layers_padding.py b/tests/pending/test_layers_padding.py index 163838cb5..ab6f6b54d 100644 --- a/tests/pending/test_layers_padding.py +++ b/tests/pending/test_layers_padding.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Padding_Test(CustomTestCase): diff --git a/tests/pending/test_layers_spatial_transformer.py b/tests/pending/test_layers_spatial_transformer.py index b585f6032..4c6d81b44 100644 --- a/tests/pending/test_layers_spatial_transformer.py +++ b/tests/pending/test_layers_spatial_transformer.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def model(x, is_train, reuse): @@ -21,8 +21,8 @@ def model(x, is_train, reuse): nt = tl.layers.DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1') nt = tl.layers.DropoutLayer(nt, keep=0.8, is_fix=True, is_train=is_train, name='drop1') # you can also use CNN instead for MLP as the localisation net - # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1') - # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2') + # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.ops.relu, padding='SAME', name='tc1') + # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.ops.relu, padding='SAME', name='tc2') ## 2. Spatial transformer module (sampler) n = tl.layers.SpatialTransformer2dAffineLayer(nin, theta_layer=nt, out_size=(40, 40), name='spatial') s = n diff --git a/tests/pending/test_layers_stack.py b/tests/pending/test_layers_stack.py index c223b0553..0745a834d 100644 --- a/tests/pending/test_layers_stack.py +++ b/tests/pending/test_layers_stack.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Stack_Test(CustomTestCase): diff --git a/tests/pending/test_layers_super_resolution.py b/tests/pending/test_layers_super_resolution.py index f60986700..9b359cb99 100644 --- a/tests/pending/test_layers_super_resolution.py +++ b/tests/pending/test_layers_super_resolution.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Super_Resolution_Test(CustomTestCase): diff --git a/tests/pending/test_layers_time_distributed.py b/tests/pending/test_layers_time_distributed.py index bb2f33fc0..a97c51117 100644 --- a/tests/pending/test_layers_time_distributed.py +++ b/tests/pending/test_layers_time_distributed.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase def model(x, is_train=True, reuse=False, name_scope="env1"): diff --git a/tests/pending/test_logging.py b/tests/pending/test_logging.py index 59f171b21..fffdf7cc5 100644 --- a/tests/pending/test_logging.py +++ b/tests/pending/test_logging.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class TL_Logger_Test(CustomTestCase): diff --git a/tests/pending/test_logging_hyperdash.py b/tests/pending/test_logging_hyperdash.py index 6616bd1c9..c39e66160 100644 --- a/tests/pending/test_logging_hyperdash.py +++ b/tests/pending/test_logging_hyperdash.py @@ -2,16 +2,18 @@ # -*- coding: utf-8 -*- import os -import time import unittest -import tensorflow as tf +import time + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl + from tensorlayer.logging.contrib import hyperdash as hd -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class TL_Logger_Test(CustomTestCase): diff --git a/tests/pending/test_mnist_simple.py b/tests/pending/test_mnist_simple.py index 90fa18b36..ec14c390a 100644 --- a/tests/pending/test_mnist_simple.py +++ b/tests/pending/test_mnist_simple.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Simple_MNIST_Test(CustomTestCase): @@ -31,7 +31,7 @@ def setUpClass(cls): # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to # speed up computation, so we use identity here. - # see tf.nn.sparse_softmax_cross_entropy_with_logits() + # see tf.ops.sparse_softmax_cross_entropy_with_logits() cls.network = tl.layers.DenseLayer(network, n_units=10, name='output') # define cost function and metric. @@ -41,7 +41,7 @@ def setUpClass(cls): correct_prediction = tf.equal(tf.argmax(y, 1), cls.y_) cls.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - # y_op = tf.argmax(tf.nn.softmax(y), 1) + # y_op = tf.argmax(tf.ops.softmax(y), 1) # define the optimizer train_params = cls.network.trainable_weights diff --git a/tests/pending/test_models.py b/tests/pending/test_models.py index dd0e07cbd..ecaf036fc 100644 --- a/tests/pending/test_models.py +++ b/tests/pending/test_models.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class VGG_Model_Test(CustomTestCase): @@ -26,7 +26,7 @@ def setUpClass(cls): # sess = tf.InteractiveSession() # vgg.restore_params(sess) # use for inferencing - # probs = tf.nn.softmax(vgg1.outputs) + # probs = tf.ops.softmax(vgg1.outputs) cls.vgg1_layers = vgg1.all_layers cls.vgg1_params = vgg1.all_params diff --git a/tests/pending/test_optimizer_amsgrad.py b/tests/pending/test_optimizer_amsgrad.py index 919881c41..0ceb8b372 100644 --- a/tests/pending/test_optimizer_amsgrad.py +++ b/tests/pending/test_optimizer_amsgrad.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Pooling_Test(CustomTestCase): diff --git a/tests/pending/test_pydocstyle.py b/tests/pending/test_pydocstyle.py index 5a7143d1d..b93bf74db 100755 --- a/tests/pending/test_pydocstyle.py +++ b/tests/pending/test_pydocstyle.py @@ -4,10 +4,12 @@ import os import unittest -from pydocstyle.checker import check, violations +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + from tests.utils import list_all_py_files -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from pydocstyle.checker import check +from pydocstyle.checker import violations registry = violations.ErrorRegistry diff --git a/tests/pending/test_reuse_mlp.py b/tests/pending/test_reuse_mlp.py index 5992b8bda..3ca435b38 100644 --- a/tests/pending/test_reuse_mlp.py +++ b/tests/pending/test_reuse_mlp.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase # define the network diff --git a/tests/pending/test_tf_layers.py b/tests/pending/test_tf_layers.py index 3ba11820c..dc04a06ff 100644 --- a/tests/pending/test_tf_layers.py +++ b/tests/pending/test_tf_layers.py @@ -4,12 +4,12 @@ import os import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Layer_Convolution_1D_Test(CustomTestCase): diff --git a/tests/pending/test_timeout.py b/tests/pending/test_timeout.py index 914c0bdf6..9b5dda621 100644 --- a/tests/pending/test_timeout.py +++ b/tests/pending/test_timeout.py @@ -3,15 +3,21 @@ import os import time + import unittest -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import (CustomTestCase, TimeoutContext, TimeoutError, WindowsError) -from tests.utils.custom_networks import InceptionV4_Network -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import WindowsError +from tests.utils import TimeoutError + +from tests.utils import TimeoutContext +from tests.utils import CustomTestCase + +from tests.utils.custom_networks import InceptionV4_Network if os.getenv("TRAVIS", None) is not None: NETWORK_CREATION_TIMEOUT = 120 # Seconds before timeout diff --git a/tests/pending/test_utils_predict.py b/tests/pending/test_utils_predict.py index bea7eb99e..ec751e275 100644 --- a/tests/pending/test_utils_predict.py +++ b/tests/pending/test_utils_predict.py @@ -4,13 +4,14 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import numpy as np -import tensorflow as tf +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Util_Predict_Test(CustomTestCase): diff --git a/tests/pending/test_yapf_format.py b/tests/pending/test_yapf_format.py index 2dc790ea9..05ff6f699 100644 --- a/tests/pending/test_yapf_format.py +++ b/tests/pending/test_yapf_format.py @@ -4,9 +4,10 @@ import sys import unittest -from yapf.yapflib.yapf_api import FormatCode +from tests.utils import list_all_py_files +from tests.utils import CustomTestCase -from tests.utils import CustomTestCase, list_all_py_files +from yapf.yapflib.yapf_api import FormatCode def _read_utf_8_file(filename): diff --git a/tests/performance_test/vgg/keras_test.py b/tests/performance_test/vgg/keras_test.py index fdb0b89d6..4b77cbea1 100644 --- a/tests/performance_test/vgg/keras_test.py +++ b/tests/performance_test/vgg/keras_test.py @@ -1,14 +1,12 @@ -import os import time - +import os import psutil -import tensorflow as tf - import keras -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) from keras.applications.vgg16 import VGG16 from keras.backend.tensorflow_backend import set_session from keras.utils import to_categorical +import tensorflow as tf +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE config = tf.ConfigProto() config.gpu_options.allow_growth = True diff --git a/tests/performance_test/vgg/pytorch_test.py b/tests/performance_test/vgg/pytorch_test.py index aaf278d4f..a81aa0be3 100644 --- a/tests/performance_test/vgg/pytorch_test.py +++ b/tests/performance_test/vgg/pytorch_test.py @@ -1,14 +1,12 @@ -import os -import time - -import numpy as np -import psutil import torch import torch.nn.functional as F import torch.optim as optim - -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) from torchvision.models import vgg16 +import time +import os +import psutil +import numpy as np +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE # set gpu_id 0 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/tests/performance_test/vgg/tf2-autograph.py b/tests/performance_test/vgg/tf2-autograph.py index 220196d34..90d2ccf0d 100644 --- a/tests/performance_test/vgg/tf2-autograph.py +++ b/tests/performance_test/vgg/tf2-autograph.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil -import tensorflow as tf from tensorflow.python.keras.applications import VGG16 - -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +import tensorflow as tf +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/performance_test/vgg/tf2-eager.py b/tests/performance_test/vgg/tf2-eager.py index 800d4421d..d4c78088f 100644 --- a/tests/performance_test/vgg/tf2-eager.py +++ b/tests/performance_test/vgg/tf2-eager.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil -import tensorflow as tf from tensorflow.python.keras.applications import VGG16 - -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +import tensorflow as tf +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/performance_test/vgg/tl2-autograph.py b/tests/performance_test/vgg/tl2-autograph.py index 1bfd6fb8c..63f553960 100644 --- a/tests/performance_test/vgg/tl2-autograph.py +++ b/tests/performance_test/vgg/tl2-autograph.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil import tensorflow as tf - import tensorlayer as tl -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/performance_test/vgg/tl2-eager.py b/tests/performance_test/vgg/tl2-eager.py index 9f0699fd3..fd2ef4085 100644 --- a/tests/performance_test/vgg/tl2-eager.py +++ b/tests/performance_test/vgg/tl2-eager.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil import tensorflow as tf - import tensorlayer as tl -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/performance_test/vgg/tl2-static-autograph.py b/tests/performance_test/vgg/tl2-static-autograph.py index 4c42a0616..0af20adb8 100644 --- a/tests/performance_test/vgg/tl2-static-autograph.py +++ b/tests/performance_test/vgg/tl2-static-autograph.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil import tensorflow as tf - import tensorlayer as tl -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/performance_test/vgg/tl2-static-eager.py b/tests/performance_test/vgg/tl2-static-eager.py index 003ed5f41..b6d5287ba 100644 --- a/tests/performance_test/vgg/tl2-static-eager.py +++ b/tests/performance_test/vgg/tl2-static-eager.py @@ -1,11 +1,9 @@ -import os import time - +import os import psutil import tensorflow as tf - import tensorlayer as tl -from exp_config import (BATCH_SIZE, LERANING_RATE, MONITOR_INTERVAL, NUM_ITERS, random_input_generator) +from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: diff --git a/tests/test_activations.py b/tests/test_activations.py index e168bd91e..39097a63b 100644 --- a/tests/test_activations.py +++ b/tests/test_activations.py @@ -4,12 +4,12 @@ import os import unittest +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import tensorflow as tf -import numpy as np import tensorlayer as tl -from tests.utils import CustomTestCase -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Test_Leaky_ReLUs(CustomTestCase): @@ -116,14 +116,6 @@ def test_swish(self): self.assertAlmostEqual(computed_output.numpy(), good_output, places=5) - def test_mish(self): - for i in range(-5, 15): - good_output = i * np.tanh(np.math.log(1 + np.math.exp(i))) - - computed_output = tl.act.mish(float(i)) - - self.assertAlmostEqual(computed_output.numpy(), good_output, places=5) - if __name__ == '__main__': diff --git a/tests/test_initializers.py b/tests/test_initializers.py index a5c978251..df86fd834 100644 --- a/tests/test_initializers.py +++ b/tests/test_initializers.py @@ -4,13 +4,13 @@ import os import unittest -import numpy as np -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase +import numpy as np -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils import CustomTestCase class Test_Leaky_ReLUs(CustomTestCase): diff --git a/tests/test_nlp.py b/tests/test_nlp.py index a8ca6dd21..680eeb83b 100644 --- a/tests/test_nlp.py +++ b/tests/test_nlp.py @@ -4,15 +4,14 @@ import os import unittest -import nltk -import tensorflow as tf -from tensorflow.python.platform import gfile +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils import CustomTestCase - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tensorflow.python.platform import gfile +from tests.utils import CustomTestCase +import nltk nltk.download('punkt') diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index 323329d63..15d4814c2 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -1,8 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from tests.utils.custom_layers import * -from tests.utils.custom_networks import * from tests.utils.custom_testcase import * from tests.utils.list_py_files import * from tests.utils.timeout_utils import * + +from tests.utils.custom_layers import * +from tests.utils.custom_networks import * \ No newline at end of file diff --git a/tests/utils/custom_layers/__init__.py b/tests/utils/custom_layers/__init__.py index d9abe0d59..995a053ce 100644 --- a/tests/utils/custom_layers/__init__.py +++ b/tests/utils/custom_layers/__init__.py @@ -2,4 +2,4 @@ # -*- coding: utf-8 -*- from tests.utils.custom_layers.basic_layers import * -from tests.utils.custom_layers.inception_blocks import * +from tests.utils.custom_layers.inception_blocks import * \ No newline at end of file diff --git a/tests/utils/custom_layers/basic_layers.py b/tests/utils/custom_layers/basic_layers.py index 27ce5c1fc..83f320aec 100644 --- a/tests/utils/custom_layers/basic_layers.py +++ b/tests/utils/custom_layers/basic_layers.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorflow as tf - import tensorlayer as tl __all__ = [ @@ -62,9 +61,10 @@ def activation_module(layer, activation_fn, leaky_relu_alpha=0.2, name=None): def conv_module( - prev_layer, n_out_channel, filter_size, strides, padding, is_train=True, use_batchnorm=True, activation_fn=None, - conv_init=tl.initializers.random_uniform(), batch_norm_init=tl.initializers.truncated_normal(mean=1., stddev=0.02), - bias_init=tf.zeros_initializer(), name=None + prev_layer, n_out_channel, filter_size, strides, padding, is_train=True, use_batchnorm=True, activation_fn=None, + conv_init=tl.initializers.random_uniform(), + batch_norm_init=tl.initializers.truncated_normal(mean=1., + stddev=0.02), bias_init=tf.zeros_initializer(), name=None ): if activation_fn not in ["ReLU", "ReLU6", "Leaky_ReLU", "PReLU", "PReLU6", "PTReLU6", "CReLU", "ELU", "SELU", @@ -98,8 +98,10 @@ def conv_module( def dense_module( - prev_layer, n_units, is_train, use_batchnorm=True, activation_fn=None, dense_init=tl.initializers.random_uniform(), - batch_norm_init=tl.initializers.truncated_normal(mean=1., stddev=0.02), bias_init=tf.zeros_initializer(), name=None + prev_layer, n_units, is_train, use_batchnorm=True, activation_fn=None, + dense_init=tl.initializers.random_uniform(), + batch_norm_init=tl.initializers.truncated_normal(mean=1., + stddev=0.02), bias_init=tf.zeros_initializer(), name=None ): if activation_fn not in ["ReLU", "ReLU6", "Leaky_ReLU", "PReLU", "PReLU6", "PTReLU6", "CReLU", "ELU", "SELU", diff --git a/tests/utils/custom_layers/inception_blocks.py b/tests/utils/custom_layers/inception_blocks.py index 90c38a9a3..89d2640d4 100644 --- a/tests/utils/custom_layers/inception_blocks.py +++ b/tests/utils/custom_layers/inception_blocks.py @@ -2,8 +2,8 @@ # -*- coding: utf-8 -*- import tensorflow as tf - import tensorlayer as tl + from tests.utils.custom_layers.basic_layers import conv_module __all__ = [ diff --git a/tests/utils/custom_networks/__init__.py b/tests/utils/custom_networks/__init__.py index e245d6ac1..81dd159ba 100644 --- a/tests/utils/custom_networks/__init__.py +++ b/tests/utils/custom_networks/__init__.py @@ -1,4 +1,4 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from tests.utils.custom_networks.inceptionv4 import * +from tests.utils.custom_networks.inceptionv4 import * \ No newline at end of file diff --git a/tests/utils/custom_networks/inceptionv4.py b/tests/utils/custom_networks/inceptionv4.py index e9895eec0..bac2ae897 100644 --- a/tests/utils/custom_networks/inceptionv4.py +++ b/tests/utils/custom_networks/inceptionv4.py @@ -3,15 +3,20 @@ import os -import tensorflow as tf +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf import tensorlayer as tl -from tests.utils.custom_layers.basic_layers import conv_module, dense_module -from tests.utils.custom_layers.inception_blocks import ( - block_inception_a, block_inception_b, block_inception_c, block_reduction_a, block_reduction_b -) -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +from tests.utils.custom_layers.basic_layers import conv_module +from tests.utils.custom_layers.basic_layers import dense_module + +from tests.utils.custom_layers.inception_blocks import block_inception_a +from tests.utils.custom_layers.inception_blocks import block_inception_b +from tests.utils.custom_layers.inception_blocks import block_inception_c + +from tests.utils.custom_layers.inception_blocks import block_reduction_a +from tests.utils.custom_layers.inception_blocks import block_reduction_b __all__ = ['InceptionV4_Network'] From d3c269b6a07178178a0dd1d1e7f986933b3b0032 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Tue, 11 May 2021 10:07:44 +0800 Subject: [PATCH 07/36] update --- .../basic_tutorials/tutorial_mnist_simple.py | 3 - .../tutorial_paddle_tensorlayer_mlp.py | 57 + tensorlayer/backend/ops/__init__.py | 2 + tensorlayer/backend/ops/dragon_backend.py | 18 +- tensorlayer/backend/ops/load_backend.py | 7 + tensorlayer/backend/ops/mindspore_backend.py | 27 +- tensorlayer/backend/ops/mindspore_nn.py | 5 + tensorlayer/backend/ops/paddle_backend.py | 972 +++++++++++++ tensorlayer/backend/ops/paddle_nn.py | 926 ++++++++++++ tensorlayer/backend/ops/tensorflow_backend.py | 18 +- tensorlayer/cost/__init__.py | 2 + tensorlayer/cost/paddle_cost.py | 2 + tensorlayer/dataflow/__init__.py | 3 + tensorlayer/initializers/__init__.py | 25 + .../initializers/load_initializers_backend.py | 16 + .../initializers/paddle_initializers.py | 178 +++ .../tensorflow_initializers.py} | 14 +- tensorlayer/layers/convolution/__init__.py | 18 +- tensorlayer/layers/convolution/quan_conv.py | 173 +++ .../layers/convolution/quan_conv_bn.py | 240 ++++ .../layers/convolution/simplified_conv.py | 12 +- .../layers/convolution/super_resolution.py | 214 +++ .../layers/convolution/ternary_conv.py | 166 +++ tensorlayer/layers/core/__init__.py | 11 +- ...re_tensorflow_dragon.py => core_dragon.py} | 27 +- tensorlayer/layers/core/core_mindspore.py | 6 +- tensorlayer/layers/core/core_paddle.py | 206 +++ tensorlayer/layers/core/core_tensorflow.py | 765 ++++++++++ tensorlayer/layers/inputs.py | 34 +- tensorlayer/layers/recurrent.py | 1256 ----------------- .../optimizers/load_optimizers_backend.py | 6 +- ...e_optimizer.py => mindspore_optimizers.py} | 0 tensorlayer/optimizers/paddle_optimizers.py | 44 + ..._optimizer.py => tensorflow_optimizers.py} | 0 34 files changed, 4111 insertions(+), 1342 deletions(-) create mode 100644 examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py create mode 100644 tensorlayer/backend/ops/paddle_backend.py create mode 100644 tensorlayer/backend/ops/paddle_nn.py create mode 100644 tensorlayer/cost/paddle_cost.py create mode 100644 tensorlayer/initializers/__init__.py create mode 100644 tensorlayer/initializers/load_initializers_backend.py create mode 100644 tensorlayer/initializers/paddle_initializers.py rename tensorlayer/{initializers.py => initializers/tensorflow_initializers.py} (96%) create mode 100644 tensorlayer/layers/convolution/quan_conv.py create mode 100644 tensorlayer/layers/convolution/quan_conv_bn.py create mode 100644 tensorlayer/layers/convolution/super_resolution.py create mode 100644 tensorlayer/layers/convolution/ternary_conv.py rename tensorlayer/layers/core/{core_tensorflow_dragon.py => core_dragon.py} (97%) create mode 100644 tensorlayer/layers/core/core_paddle.py create mode 100644 tensorlayer/layers/core/core_tensorflow.py rename tensorlayer/optimizers/{mindspore_optimizer.py => mindspore_optimizers.py} (100%) create mode 100644 tensorlayer/optimizers/paddle_optimizers.py rename tensorlayer/optimizers/{tensorflow_optimizer.py => tensorflow_optimizers.py} (100%) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index e55f67e63..4d2bc7ccc 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -2,13 +2,10 @@ # -*- coding: utf-8 -*- import numpy as np -import time import os os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' - -import tensorflow as tf import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Dropout diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py new file mode 100644 index 000000000..e67477e71 --- /dev/null +++ b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py @@ -0,0 +1,57 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'paddle' + +import paddle.nn.functional as F +from paddle.vision.transforms import Compose, Normalize +import paddle + +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Flatten + +transform = Compose([Normalize(mean=[127.5], + std=[127.5], + data_format='CHW')]) +print('download training data and load training data') +train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) +test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) +print('load finished') + +class MLP(Module): + def __init__(self): + super(MLP, self).__init__() + self.linear1 = Dense(n_units=120, in_channels=784, act=tl.ReLU) + self.linear2 = Dense(n_units=84, in_channels=120, act=tl.ReLU) + self.linear3 = Dense(n_units=10, in_channels=84) + self.flatten = Flatten() + + def forward(self, x): + x = self.flatten(x) + x = self.linear1(x) + x = self.linear2(x) + x = self.linear3(x) + return x + +train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) + +def train(model): + model.train() + epochs = 2 + optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.trainable_weights) + for epoch in range(epochs): + for batch_id, data in enumerate(train_loader()): + x_data = data[0] + y_data = data[1] + predicts = model(x_data) + loss = F.cross_entropy(predicts, y_data) + acc = paddle.metric.accuracy(predicts, y_data) + loss.backward() + if batch_id % 50 == 0: + print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy())) + optim.step() + optim.clear_grad() +model = MLP() +train(model) + diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 4d50f0f50..5b8a61b4e 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -121,4 +121,6 @@ from .load_backend import Minimum from .load_backend import Maximum from .load_backend import Meshgrid +from .load_backend import BatchToSpace +from .load_backend import DepthToSpace diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py index b7c36c0dd..37e6e5aa5 100644 --- a/tensorlayer/backend/ops/dragon_backend.py +++ b/tensorlayer/backend/ops/dragon_backend.py @@ -1020,4 +1020,20 @@ def divide(x, y): raise NotImplementedError def identity(x): - raise NotImplementedError \ No newline at end of file + raise NotImplementedError + +class BatchToSpace(object): + def __init__(self, block_size, crops): + super(BatchToSpace, self).__init__() + pass + + def __call__(self, input_x): + raise NotImplementedError + + +class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): + pass + + def __call__(self, input): + raise NotImplementedError \ No newline at end of file diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index ed4e48062..4343507a2 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -70,5 +70,12 @@ import dragon as dg BACKEND_VERSION = dg.__version__ sys.stderr.write('Using Dragon backend.\n') + +elif BACKEND == 'paddle': + from .paddle_backend import * + from .paddle_nn import * + import paddle as pd + BACKEND_VERSION = pd.__version__ + sys.stderr.write('Using Paddle backend.\n') else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index 8ebc6f120..d067be2f3 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -1211,4 +1211,29 @@ def divide(x, y): raise NotImplementedError def identity(x): - raise NotImplementedError \ No newline at end of file + raise NotImplementedError + +class BatchToSpace(Cell): + def __init__(self, block_size, crops): + super(BatchToSpace, self).__init__() + self.batch_to_space = P.BatchToSpace(block_size=block_size, crops=crops) + + def __call__(self, input_x): + return self.batch_to_space(input_x) + +class DepthToSpace(Cell): + def __init__(self, block_size, data_format='NHWC'): + super(DepthToSpace, self).__init__() + self.data_format = data_format + self.depth_to_space = P.DepthToSpace(block_size=block_size) + + def __call__(self, input): + if self.data_format == 'NHWC': + input = nhwc_to_nchw(input) + + output = self.depth_to_space(input) + + if self.data_format == 'NHWC': + output = nchw_to_nhwc(output) + + return output \ No newline at end of file diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 944b10cb4..3af443081 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -970,6 +970,8 @@ def construct(self, x, filters): output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) output = self.squeeze(output) + if self.data_format == 'NWC': + output = nchw_to_nhwc(output) return output @@ -1054,6 +1056,9 @@ def construct(self, x, filters): output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) + if self.data_format == 'NHWC': + output = nchw_to_nhwc(x) + return output diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py new file mode 100644 index 000000000..e9b37c562 --- /dev/null +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -0,0 +1,972 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +import paddle as pd +import paddle.nn as nn + +_dtypeDict = ["float16", "float32", "float64", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"] +# TODO NotImplemented +DType = None +float16 = "float16" +float32 = "float32" +float64 = "float64" +int8 = "int8" +int16 = "int16" +int32 = "int32" +int64 = "int64" +uint8 = "uint8" +uint16 = "uint16" +uint32 = "uint32" +uint64 = "uint64" + +def _getter(init_fn, **kwargs): + """Return an named eager tensor.""" + raise NotImplementedError + + +def set_context(**kwargs): + raise Exception("Using Paddle backend,You don't need to set context") + + +def get_tensor_shape(x): + return pd.shape(x) + + +# initializers +def zeros(shape, dtype="float32"): + """ + Creates a tensor with all elements set to zero. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return pd.zeros(shape=shape, dtype=dtype) + + +def ones(shape, dtype="float32"): + """ + Creates a tensor with all elements set to ones. + + Parameters + ---------- + shape : A list of integers + a tuple of integers, or a 1-D Tensor of type int32. + dtype : tensor + The DType of an element in the resulting Tensor + + Returns + ------- + A Tensor with all elements set to zero. + + """ + return pd.ones(shape=shape, dtype=dtype) + + +def constant(value, shape, dtype="float32"): + """ + Creates a constant tensor from a tensor-like object. + + Parameters + ---------- + value : list + A constant value (or list) of output type dtype. + dtype : tensor + The type of the elements of the resulting tensor. + shape : tuple + Optional dimensions of resulting tensor. + + Returns + ------- + A Constant Tensor. + + """ + return nn.initializer.constant(value=value) + + +def random_uniform(shape, minval=0, maxval=None, dtype="float32", seed=None): + """ + Outputs random values from a uniform distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + minval : int + The lower bound on the range of random values to generate (inclusive). Defaults to 0. + maxval : int + The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. + dtype : tensor + The type of the output: float16, float32, float64, int32, or int64. + seed : int + Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls. + Returns + ------- + A tensor of the specified shape filled with random uniform values. + + """ + raise NotImplementedError + + +def random_normal(shape, mean=0.0, stddev=1.0, dtype="float32", seed=None): + """ + Outputs random values from a normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random normal values. + + """ + raise NotImplementedError + + +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype="float32", seed=None): + """ + Outputs random values from a truncated normal distribution. + + Parameters + ---------- + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + mean : float + The mean of the normal distribution + stddev : float + The standard deviation of the normal distribution. + dtype : tensor + The type of the output. + seed : A Python integer + Used to create a random seed for the distribution + + Returns + ------- + A tensor of the specified shape filled with random truncated normal values. + + """ + raise NotImplementedError + + +def he_normal(shape, dtype, seed=None): + """ + He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + shape : tuple + A 1-D integer Tensor or Python array. The shape of the output tensor. + dtype : tensor + The type of the output. + + Returns + ------- + A tensor of the specified shape filled with he normal values. + """ + # shape = shape[::-1] + raise NotImplementedError + + +def Variable(initial_value, name, trainable=None): + """ + Creates a new variable with value initial_value. + + Parameters + ---------- + initial_value : tensor + A Tensor, or Python object convertible to a Tensor + name : str + Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. + Returns + ------- + Variable + """ + raise NotImplementedError + + +class MatMul(object): + + def __init__(self): + pass + + def __call__(self, a, b): + return pd.matmul(x=a, y=b) + + +def matmul(a, b): + """ + Multiplies matrix a by matrix b, producing a * b. + + Parameters + ---------- + a : tensor + type float16, float32, float64, int32, complex64, complex128 and rank > 1. + b : tensor + with same type and rank as a. + + Returns + ------- + A Tensor of the same type as a and b + """ + raise NotImplementedError + + +def add(value, bias): + """ + Returns x + y element-wise. + + Parameters + ---------- + value : tensor. + Must be one of the following types: bfloat16, half, float32, float64, + uint8, int8, int16, int32, int64, complex64, complex128, string. + bias : tensor + Must have the same type as a + name : str + A name for the operation + + Returns + ------- + A Tensor. Has the same type as a. + """ + + raise NotImplementedError + + +def dtypes(dt): + """ + Data dtypes. + + Parameters + ---------- + dt : string + It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', + 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. + + Returns + ------- + Data dtypes + """ + raise NotImplementedError + + +class Maximum(object): + def __init__(self): + pass + + def __call__(self, x, y): + raise NotImplementedError + + +class Minimum(object): + def __init__(self): + pass + + def __call__(self, x, y): + raise NotImplementedError + + +def minimum(x, y): + """ + Returns the min of x and y (i.e. x < y ? x : y) element-wise. + + Parameters + ---------- + x : tensor. + Must be one of the following types: bfloat16, half, float32, float64, int32, int64. + y : A Tensor. + Must have the same type as x. + name : str + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as x + """ + raise NotImplementedError + + +class FlattenReshape(object): + + def __init__(self): + pass + + def __call__(self, inputs): + return pd.flatten(x=inputs, start_axis=1,stop_axis=-1) + + +class Reshape(object): + + def __init__(self, shape): + self.shape = shape + + def __call__(self, tensor): + raise NotImplementedError + + +def reshape(tensor, shape): + """ + Reshapes a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + shape : tensor + Defines the shape of the output tensor. + Returns + ------- + A Tensor. Has the same type as tensor + """ + raise NotImplementedError + + +class Concat(object): + + def __init__(self, axis): + super(Concat, self).__init__() + self.axis = axis + + def __call__(self, values): + raise NotImplementedError + + +def concat(values, axis): + """ + Concatenates tensors along one dimension. + + Parameters + ---------- + values : list + A list of Tensor objects or a single Tensor + axis : int + 0-D int32 Tensor. Dimension along which to concatenate + Returns + ------- + A Tensor resulting from concatenation of the input tensors. + """ + raise NotImplementedError + + +def convert_to_tensor(value, dtype=None): + """ + Converts the given value to a Tensor. + + Parameters + ---------- + value : object + An object whose type has a registered Tensor conversion function. + dtype : optional + Optional element type for the returned tensor. If missing, the type is inferred from the type of value. + + Returns + ------- + A Tensor based on value. + """ + raise NotImplementedError + + +def sqrt(x): + """ + Computes square root of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + raise NotImplementedError + + +class ReduceSum(object): + + def __init__(self, axis): + pass + + def construct(self, input): + pass + + +class ReduceMean(object): + + def __init__(self, axis): + if axis == [1, 2]: + self.data_format = 'NHWC' + elif axis == [2, 3]: + self.data_format = 'NCHW' + else: + raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + + def __call__(self, inputs): + raise NotImplementedError + + +def reduce_mean(input_tensor, axis=None): + """ + Computes the mean of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + raise NotImplementedError + + +class ReduceMax(object): + + def __init__(self, axis): + if axis == [1, 2]: + self.data_format = 'NHWC' + elif axis == [2, 3]: + self.data_format = 'NCHW' + else: + raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + + def __call__(self, inputs): + raise NotImplementedError + + +def reduce_max(input_tensor, axis=None): + """ + Computes the maximum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + + raise NotImplementedError + + +def reduce_min(input_tensor, axis=None): + """ + Computes the minimum of elements across dimensions of a tensor. + + Parameters + ---------- + input_tensor : tensor + The tensor to reduce. Should have real numeric type. + axis : int + The dimensions to reduce. If None (the default), reduces all dimensions. + Must be in the range [-rank(input_tensor), rank(input_tensor)). + name : str + A name for the operation (optional). + + Returns + ------- + The reduced tensor. + """ + raise NotImplementedError + +class Pad(object): + def __init__(self, paddings, mode="REFLECT"): + if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: + raise Exception("Unsupported mode: {}".format(mode)) + if mode == 'SYMMETRIC': + mode = 'EDGE' + self.paddings = paddings + self.mode = mode + + def __call__(self, x): + raise NotImplementedError + +def pad(tensor, paddings, mode='CONSTANT', constant_values=0): + """ + Pads a tensor. + + Parameters + ---------- + tensor : tensor + A Tensor. + paddings : tuple + A tuple of type int32. + mode : str + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + constant_values : int + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + + Returns + ------- + A Tensor. Has the same type as tensor. + """ + raise NotImplementedError + + +class Unstack(object): + + def __init__(self, axis, num=None): + self.axis = axis + self.num = num + + def __call__(self, values): + raise NotImplementedError + + +class Stack(object): + + def __init__(self, axis): + self.axis = axis + + def __call__(self, values): + raise NotImplementedError + + +def stack(values, axis=0): + """ + Stacks a list of rank-R tensors into one rank-(R+1) tensor. + + Parameters + ---------- + values : list + A list of Tensor objects with the same shape and type. + axis : int + An int. The axis to stack along. Defaults to the first dimension. + Negative values wrap around, so the valid range is [-(R+1), R+1). + + Returns + ------- + A stacked Tensor with the same type as values. + """ + raise NotImplementedError + + +class Meshgrid(object): + def __init__(self, indexing='xy'): + super(Meshgrid, self).__init__() + self.index = indexing + + def __call__(self, inputs): + pass + + +def meshgrid(x, y): + """ + Broadcasts parameters for evaluation on an N-D grid. + + Parameters + ---------- + x : tensor + Tensors with rank 1. + y : tensor + Tensors with rank 1. + + Returns + ------- + A list of N Tensors with rank N. + """ + + pass + + +def range(start, limit=None, delta=1, dtype=None): + """ + Creates a sequence of numbers. + + Parameters + ---------- + start : tensor + A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; + otherwise, acts as range limit and first entry defaults to 0. + limit : tensor + A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, + defaults to the value of start while the first entry of the range defaults to 0. + delta : tensor + A 0-D Tensor (scalar). Number that increments start. Defaults to 1. + dtype : type + The type of the elements of the resulting tensor. + + Returns + ------- + An 1-D Tensor of type dtype. + """ + raise NotImplementedError + + +class ExpandDims(object): + + def __init__(self, axis): + pass + + def construct(self, input): + pass + + +def expand_dims(input, axis): + """ + Inserts a dimension of 1 into a tensor's shape. + + Parameters + ---------- + input : tensor + A Tensor. + axis : int + 0-D (scalar). Specifies the dimension index at which to expand the shape of input. + Must be in the range [-rank(input) - 1, rank(input)]. + + Returns + ------- + A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. + """ + + raise NotImplementedError + + +class Tile(object): + + def __init__(self): + pass + + def __call__(self, input, multiples): + raise NotImplementedError + + +def tile(input, multiples): + """ + Constructs a tensor by tiling a given tensor. + + Parameters + ---------- + input : tensor + A Tensor. 1-D or higher. + multiples : tensor + Must be one of the following types: int32, int64. 1-D. + Length must be the same as the number of dimensions in input + + Returns + ------- + A Tensor. Has the same type as input. + """ + raise NotImplementedError + + +class Cast(object): + + def __init__(self, dtype): + pass + + def __call__(self, input): + pass + + +def cast(x, dtype): + """ + Casts a tensor to a new type. + + Parameters + ---------- + x : tensor + A Tensor or SparseTensor or IndexedSlices of numeric type. + It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. + dtype : dtpye + The destination type. The list of supported dtypes is the same as x + + Returns + ------- + A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. + """ + raise NotImplementedError + + +class Transpose(object): + + def __init__(self, perm, conjugate=False): + self.perm = perm + if conjugate: + raise ("The conjugate Parameters not supported") + + def __call__(self, a): + raise NotImplementedError + + +def transpose(a, perm=None, conjugate=False): + """ + Transposes a. + + Parameters + ---------- + a : tensor + A Tensor. + perm : int + A permutation of the dimensions of a. + conjugate : bool + Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)). + + Returns + ------- + A transposed Tensor. + """ + + raise NotImplementedError + + +def gather_nd(params, indices, batch_dims=0): + """ + Gather slices from params into a Tensor with shape specified by indices. + + Parameters + ---------- + params : tensor + The tensor from which to gather values. + indices : tensor + Must be one of the following types: int32, int64. Index tensor. + batch_dims : int + An integer or a scalar 'Tensor'. The number of batch dimensions. + + Returns + ------- + A Tensor. Has the same type as params. + """ + + pass + + +def clip_by_value(t, clip_value_min, clip_value_max): + """ + Clips tensor values to a specified min and max. + + Parameters + ---------- + t : tensor + A Tensor or IndexedSlices + clip_value_min : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + clip_value_max : tensor + A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by + + Returns + ------- + A clipped Tensor or IndexedSlices. + """ + + pass + + +def split(value, num_or_size_splits, axis=0, num=None): + """ + Splits a tensor into sub tensors. + + Parameters + ---------- + value : tensor + The Tensor to split. + num_or_size_splits : list + Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or + Python list containing the sizes of each output tensor along split_dim. + axis : int + The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. + num : int + used to specify the number of outputs when it cannot be inferred from the shape of size_splits. + + Returns + ------- + Tensor objects resulting from splitting value. + """ + pass + + +def floor(x): + raise NotImplementedError + + +def gather(params, indices): + raise NotImplementedError + + +def linspace(start, stop, num): + raise NotImplementedError + + +def slice(inputs, starts, sizes): + raise NotImplementedError + + +def add_n(inputs): + raise NotImplementedError + + +class OneHot(object): + + def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype="float32"): + self.depth = depth + self.dtype = dtype + + def __call__(self, indices): + raise NotImplementedError + + +class L2Normalize(object): + + def __init__(self, axis=None, epsilon=1e-12): + super(L2Normalize, self).__init__() + pass + + def __call__(self, input, *args, **kwargs): + pass + + +class EmbeddingLookup(object): + + def __init__(self, max_norm=None): + self.max_norm = max_norm + + def __call__(self, params, ids, *args, **kwargs): + pass + + +class NCELoss(object): + + def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): + super(NCELoss, self).__init__() + + def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): + pass + + +class Not_equal(object): + + def __init__(self): + pass + + def __call__(self, x, y): + pass + + +class Count_nonzero(object): + + def __init__(self, keepdims=None, dtype="int64"): + pass + + def __call__(self, *args, **kwargs): + pass + + + +class Resize: + + def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): + if method not in ['nearest', 'linear', 'bilinear']: + raise ('Current resize does not support this method.') + if method == 'bilinear': + method = 'linear' + self.method = method + self.antialias = antialias + self.scale = scale + if data_format != 'channel_last': + raise Exception("UpSampling2d resize_images only support channel_last") + + def __call__(self, inputs): + raise NotImplementedError + + +def resize(inputs, output_size, method, antialias): + raise NotImplementedError + + +class ZeroPadding1D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class ZeroPadding2D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class ZeroPadding3D(object): + + def __init__(self): + pass + + def __call__(self, padding): + raise NotImplementedError + + +class Sign(object): + + def __init__(self): + pass + + def __call__(self, x): + raise NotImplementedError + +def ceil(x): + raise NotImplementedError + +def multiply(x, y): + raise NotImplementedError + +def divide(x, y): + raise NotImplementedError + +def identity(x): + raise NotImplementedError + +class BatchToSpace(object): + def __init__(self, block_size, crops): + super(BatchToSpace, self).__init__() + pass + + def __call__(self, input_x): + raise NotImplementedError + + +class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): + pass + + def __call__(self, input): + raise NotImplementedError \ No newline at end of file diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py new file mode 100644 index 000000000..47d9dd062 --- /dev/null +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -0,0 +1,926 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import paddle as pd +import paddle.nn.functional as F + +def padding_format(padding): + """ + Checks that the padding format correspond format. + + Parameters + ---------- + padding : str + Must be one of the following:"same", "SAME", "VALID", "valid" + + Returns + ------- + str "SAME" or "VALID" + """ + + if padding in ["SAME", "same"]: + padding = "SAME" + elif padding in ["VALID", "valid"]: + padding = "VALID" + elif padding == None: + padding = None + else: + raise Exception("Unsupported padding: " + str(padding)) + return padding + + +def preprocess_1d_format(data_format, padding): + """ + Checks that the 1-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NWC","NCW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NWC" or "NCW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NWC"]: + data_format = "NWC" + elif data_format in ["channels_first", "NCW"]: + data_format = "NCW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_2d_format(data_format, padding): + """ + Checks that the 2-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NHWC","NCHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NHWC" or "NCHW" and "SAME" or "VALID" + """ + + if data_format in ["channels_last", "NHWC", "nhwc"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW", "nchw"]: + data_format = "NCHW" + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def preprocess_3d_format(data_format, padding): + """ + Checks that the 3-D dataformat format correspond format. + + Parameters + ---------- + data_format : str + Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" + padding : str + Must be one of the following:"same","valid","SAME","VALID" + + Returns + ------- + str "NDHWC" or "NCDHW" and "SAME" or "VALID" + """ + + if data_format in ['channels_last', 'NDHWC']: + data_format = 'NDHWC' + elif data_format in ['channels_first', 'NCDHW']: + data_format = 'NCDHW' + elif data_format == None: + data_format = None + else: + raise Exception("Unsupported data format: " + str(data_format)) + padding = padding_format(padding) + return data_format, padding + + +def nchw_to_nhwc(x): + """ + Channels first to channels last + + Parameters + ---------- + x : tensor + channels first tensor data + + Returns + ------- + channels last tensor data + """ + + pass + + +def nhwc_to_nchw(x): + """ + Channles last to channels first + + Parameters + ---------- + x : tensor + channels last tensor data + + Returns + ------- + channels first tensor data + """ + + pass + + +class ReLU(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.relu(x) + + +def relu(x): + """ + Computes rectified linear: max(features, 0). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor. Has the same type as features. + """ + return F.relu(x) + + +class ReLU6(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.relu6(x) + + +def relu6(x): + """ + Computes Rectified Linear 6: min(max(features, 0), 6). + + Parameters + ---------- + x : tensor + Must be one of the following types: float32, float64, int32, uint8, int16, + int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. + + Returns + ------- + A Tensor with the same type as features. + """ + return F.relu6(x) + + +class LeakyReLU(object): + + def __init__(self, alpha=0.2): + self.alpha = alpha + + def __call__(self, x): + return F.leaky_relu(x, negative_slope=self.alpha) + + +def leaky_relu(x): + """ + Compute the Leaky ReLU activation function. + + Parameters + ---------- + x : tensor + representing preactivation values. Must be one of the following types: + float16, float32, float64, int32, int64. + + Returns + ------- + The activation value. + """ + + return F.leaky_relu(x) + + +class Softplus(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.softplus(x) + + +def softplus(x): + """ + Computes softplus: log(exp(features) + 1). + + Parameters + ---------- + x : tensor + Must be one of the following types: half, bfloat16, float32, float64. + + Returns + ------- + A Tensor. Has the same type as features. + """ + + return F.softplus(x) + + +class Tanh(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.tanh(x) + + +def tanh(x): + """ + Computes hyperbolic tangent of x element-wise. + + Parameters + ---------- + x : tensor + Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. + + Returns + ------- + A Tensor. Has the same type as x. + """ + + return F.tanh(x) + + +class Sigmoid(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.sigmoid(x) + + +def sigmoid(x): + """ + Computes sigmoid of x element-wise. + + Parameters + ---------- + x : tensor + A Tensor with type float16, float32, float64, complex64, or complex128. + + Returns + ------- + A Tensor with the same type as x. + """ + return F.sigmoid(x) + + +class Softmax(object): + + def __init__(self): + pass + + def __call__(self, x): + return F.softmax(x) + + +def softmax(logits, axis=-1): + """ + Computes softmax activations. + + Parameters + ---------- + logits : tensor + Must be one of the following types: half, float32, float64. + axis : int + The dimension softmax would be performed on. The default is -1 which indicates the last dimension. + + Returns + ------- + A Tensor. Has the same type and shape as logits. + """ + return F.softmax(logits, axis=axis) + + +class Dropout(object): + + def __init__(self, keep, seed=1): + self.keep = 1 - keep + self.seed = seed + + def __call__(self, inputs): + raise NotImplementedError + + +class BiasAdd(object): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + Returns + ------- + A Tensor with the same type as value. + """ + + def __init__(self, data_format='NHWC'): + self.data_format = data_format + + def __call__(self, x, bias): + return pd.add(x, bias) + + +def bias_add(x, bias): + """ + Adds bias to value. + + Parameters + ---------- + x : tensor + A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. + bias : tensor + Must be the same type as value unless value is a quantized type, + in which case a different quantized type may be used. + data_format : A string. + 'N...C' and 'NC...' are supported. + name : str + A name for the operation (optional). + Returns + ------- + A Tensor with the same type as value. + """ + raise NotImplementedError + + +class Conv1D(object): + pass + # raise NotImplementedError + + +def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): + """ + Computes a 1-D convolution given 3-D input and filter tensors. + + Parameters + ---------- + input : tensor + A 3D Tensor. Must be of type float16, float32, or float64 + filters : tensor + A 3D Tensor. Must have the same type as input. + stride : int of list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'SAME' or 'VALID' + data_format : string + An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of + [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. + dilations : int or list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + Returns + ------- + A Tensor. Has the same type as input. + """ + + pass + + +class Conv2D(object): + + def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.ksize = k_size[0] + if self.data_format is 'NHWC': + self.dg_stride = strides[1] + self.dg_dilation = dilations[1] + elif self.data_format is 'NCHW': + self.dg_stride = strides[2] + self.dg_dilation = dilations[2] + + def __call__(self, inputs, filters): + raise NotImplementedError + + +def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): + """ + Computes a 2-D convolution given 4-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. + The dimension order is interpreted according to the value of data_format, see below for details. + filters : tensor + Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] + strides : int of list + The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. + padding : string + "SAME" or "VALID" + data_format : string + "NHWC", "NCHW". Defaults to "NCHW". + dilations : list or ints + list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. + + Returns + ------- + A Tensor. Has the same type as input. + """ + raise NotImplementedError + + +class Conv3D(object): + pass + # raise NotImplementedError + + +def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): + """ + Computes a 3-D convolution given 5-D input and filters tensors. + + Parameters + ---------- + input : tensor + Must be one of the following types: half, bfloat16, float32, float64. + Shape [batch, in_depth, in_height, in_width, in_channels]. + filters : tensor + Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. + in_channels must match between input and filters. + strides : list of ints + A list of ints that has length >= 5. 1-D tensor of length 5. + The stride of the sliding window for each dimension of input. + Must have strides[0] = strides[4] = 1. + padding : string + A string from: "SAME", "VALID". The type of padding algorithm to use. + data_format : string + An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + dilations : list of ints + Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. + If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. + The dimension order is determined by the value of data_format, see above for details. + Dilations in the batch and depth dimensions must be 1. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor. Has the same type as input. + """ + + raise NotImplementedError + + +def lrn(inputs, depth_radius, bias, alpha, beta): + """ + Local Response Normalization. + + Parameters + ---------- + inputs : tensor + Must be one of the following types: half, bfloat16, float32. 4-D. + depth_radius : int + Defaults to 5. 0-D. Half-width of the 1-D normalization window. + bias : float + Defaults to 1. An offset (usually positive to avoid dividing by 0). + alpha : float + Defaults to 1. A scale factor, usually positive. + beta : float + Defaults to 0.5. An exponent. + + Returns + ------- + A Tensor. Has the same type as input. + """ + pass + + +def moments(x, axes, shift=None, keepdims=False): + """ + Calculates the mean and variance of x. + + Parameters + ---------- + x : tensor + A Tensor + axes : ints + Axes along which to compute mean and variance. + shift : int + Not used in the current implementation. + keepdims : bool + produce moments with the same dimensionality as the input. + + Returns + ------- + Two Tensor objects: mean and variance. + """ + + pass + + +class MaxPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + raise NotImplementedError + + +def max_pool(input, ksize, strides, padding, data_format=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start + with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". + Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + pass + + +class AvgPool(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.filter_size = ksize + self.strides = strides + + def __call__(self, inputs): + raise NotImplementedError + + +def avg_pool(input, ksize, strides, padding): + """ + Performs the avg pooling on the input. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + ksize : int or list of ints + An int or list of ints that has length 1, N or N+2. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, N or N+2. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + + Returns + ------- + A Tensor of format specified by data_format. The average pooled output tensor. + """ + pass + + +def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the max pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of the format specified by data_format. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. + With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. + Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. + name : string + A name for the operation (optional). + + Returns + ------- + A Tensor of format specified by data_format. The max pooled output tensor. + """ + pass + + +def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): + """ + Performs the average pooling on the input. + + Parameters + ---------- + input : tensor + A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. + ksize : int or list of ints + An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. + strides : int or list of ints + An int or list of ints that has length 1, 3 or 5. + The stride of the sliding window for each dimension of the input tensor. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + name : string + Optional name for the operation. + + Returns + ------- + A Tensor with the same type as value. The average pooled output tensor. + """ + pass + + +def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): + """ + Performs an N-D pooling operation. + + Parameters + ---------- + input : tensor + Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] + if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape + if data_format starts with "NC". Pooling happens over the spatial dimensions only. + window_shape : int + Sequence of N ints >= 1. + pooling_type : string + Specifies pooling operation, must be "AVG" or "MAX". + strides : ints + Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. + padding : string + The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". + See the "returns" section of tf.ops.convolution for details. + data_format : string + Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), + or the second dimension (if data_format starts with "NC"). + For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". + For N=3, the valid values are "NDHWC" (default) and "NCDHW". + dilations : list of ints + Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. + name : string + Optional. Name of the op. + + Returns + ------- + Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] + """ + pass + + +class DepthwiseConv2d(object): + + def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.stride = strides + self.dilations = dilations + + def __call__(self, input, filter): + raise NotImplementedError("Not implemented depthwiseconv2d") + + +def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): + """ + Depthwise 2-D convolution. + + Parameters + ---------- + input : tensor + 4-D with shape according to data_format. + filter : tensor + 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. + strides : list + 1-D of size 4. The stride of the sliding window for each dimension of input. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + The data format for input. Either "NHWC" (default) or "NCHW". + dilations : list + 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. + If it is greater than 1, then all values of strides must be 1. + name : string + A name for this operation (optional). + + Returns + ------- + A 4-D Tensor with shape according to data_format. + E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. + """ + + pass + +class Conv1d_transpose(object): + + def __init__( + self, strides, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + + def __call__(self, input, filters): + raise NotImplementedError + + +def conv1d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None +): + """ + The transpose of conv1d. + + Parameters + ---------- + input : tensor + A 3-D Tensor of type float and shape [batch, in_width, in_channels] + for NWC data format or [batch, in_channels, in_width] for NCW data format. + filters : tensor + A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NWC' and 'NCW' are supported. + dilations : list + An int or list of ints that has length 1 or 3 which defaults to 1. + The dilation factor for each dimension of input. If set to k > 1, + there will be k-1 skipped cells between each filter element on that dimension. + Dilations in the batch and depth dimensions must be 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + pass + + +class Conv2d_transpose(object): + + def __init__( + self, strides, padding, data_format='NHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.name = name + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + + def __call__(self, input, filters): + raise NotImplementedError + + +def conv2d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None +): + """ + The transpose of conv2d. + + Parameters + ---------- + input : tensor + A 4-D Tensor of type float and shape [batch, height, width, in_channels] + for NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 4-D Tensor with the same type as input and shape [height, width, + output_channels, in_channels]. filter's in_channels dimension must match that of input. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. + If a single value is given it is replicated in the H and W dimension. + By default the N and C dimensions are set to 0. + The dimension order is determined by the value of data_format, see below for details. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NHWC' and 'NCHW' are supported. + dilations : list + An int or list of ints that has length 1, 2 or 4, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as input. + """ + pass + + +class Conv3d_transpose(object): + + def __init__( + self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + self.strides = strides + self.dilations = dilations + self.name = name + self.out_channel = out_channel + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + def __call__(self, input, filters): + raise NotImplementedError + + +def conv3d_transpose( + input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None +): + """ + The transpose of conv3d. + + Parameters + ---------- + input : tensor + A 5-D Tensor of type float and shape [batch, height, width, in_channels] for + NHWC data format or [batch, in_channels, height, width] for NCHW data format. + filters : tensor + A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. + filter's in_channels dimension must match that of value. + output_shape : tensor + A 1-D Tensor representing the output shape of the deconvolution op. + strides : list + An int or list of ints that has length 1, 3 or 5. + padding : string + 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. + data_format : string + 'NDHWC' and 'NCDHW' are supported. + dilations : list of ints + An int or list of ints that has length 1, 3 or 5, defaults to 1. + name : string + Optional name for the returned tensor. + + Returns + ------- + A Tensor with the same type as value. + """ + + pass + + +class BatchNorm(object): + + def __init__(self): + pass + + def __call__(self, *args, **kwargs): + pass diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index d22513042..74df53def 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -1002,4 +1002,20 @@ def divide(x, y): return tf.divide(x, y) def identity(x): - return tf.identity(x) \ No newline at end of file + return tf.identity(x) + +class BatchToSpace(object): + def __init__(self, block_size, crops): + self.bolock_size = block_size + self.crops = crops + + def __call__(self, input_x): + return tf.batch_to_space(input=input_x, block_shape=self.bolock_size, crops=self.crops) + +class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): + self.block_size = block_size + self.data_format = data_format + + def __call__(self, input): + return tf.nn.depth_to_space(input, block_size=self.block_size, data_format=self.data_format) diff --git a/tensorlayer/cost/__init__.py b/tensorlayer/cost/__init__.py index eb18aae26..76fa2c6de 100644 --- a/tensorlayer/cost/__init__.py +++ b/tensorlayer/cost/__init__.py @@ -9,5 +9,7 @@ from .mindspore_cost import * elif BACKEND == 'dragon': pass +elif BACKEND == 'paddle': + from .mindspore_cost import * else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py new file mode 100644 index 000000000..a07f0de0d --- /dev/null +++ b/tensorlayer/cost/paddle_cost.py @@ -0,0 +1,2 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- \ No newline at end of file diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py index f0c6c284d..d26b3226e 100644 --- a/tensorlayer/dataflow/__init__.py +++ b/tensorlayer/dataflow/__init__.py @@ -15,6 +15,9 @@ elif BACKEND == 'dragon': pass +elif BACKEND == 'paddle': + pass + else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/initializers/__init__.py b/tensorlayer/initializers/__init__.py new file mode 100644 index 000000000..80557bd31 --- /dev/null +++ b/tensorlayer/initializers/__init__.py @@ -0,0 +1,25 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +# __all__ = [ +# 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', +# 'deconv2d_bilinear_upsampling_initializer', 'He_Normal' +# ] + +from .load_initializers_backend import Zeros +from .load_initializers_backend import Ones +from .load_initializers_backend import Constant +from .load_initializers_backend import RandomUniform +from .load_initializers_backend import RandomNormal +from .load_initializers_backend import TruncatedNormal +from .load_initializers_backend import deconv2d_bilinear_upsampling_initializer +from .load_initializers_backend import HeNormal + +# Alias +zeros = Zeros +ones = Ones +constant = Constant +random_uniform = RandomUniform +random_normal = RandomNormal +truncated_normal = TruncatedNormal +he_normal = HeNormal \ No newline at end of file diff --git a/tensorlayer/initializers/load_initializers_backend.py b/tensorlayer/initializers/load_initializers_backend.py new file mode 100644 index 000000000..fc65bab8d --- /dev/null +++ b/tensorlayer/initializers/load_initializers_backend.py @@ -0,0 +1,16 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_initializers import * +elif BACKEND == 'mindspore': + from .tensorflow_initializers import * +elif BACKEND == 'dragon': + from .tensorflow_initializers import * +elif BACKEND == 'paddle': + from .paddle_initializers import * +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/initializers/paddle_initializers.py b/tensorlayer/initializers/paddle_initializers.py new file mode 100644 index 000000000..22ffa7a55 --- /dev/null +++ b/tensorlayer/initializers/paddle_initializers.py @@ -0,0 +1,178 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from paddle.fluid.initializer import ConstantInitializer +from paddle.fluid.initializer import UniformInitializer +from paddle.fluid.initializer import NormalInitializer +from paddle.fluid.initializer import TruncatedNormalInitializer +from paddle.fluid.initializer import MSRAInitializer +import paddle + +__all__ = [ + 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', + 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' +] + + +class Zeros(ConstantInitializer): + """Initializer that generates tensors initialized to 0. + """ + + def __init__(self): + super(Zeros, self).__init__(value=0.0, force_cpu=False) + + +class Ones(object): + """Initializer that generates tensors initialized to 1. + """ + + def __init__(self): + # super(Ones, self).__init__(value=1.0, force_cpu=False) + pass + + def __call__(self, shape, dtype): + return paddle.ones(shape=shape, dtype=dtype) + + +class Constant(ConstantInitializer): + """Initializer that generates tensors initialized to a constant value. + + Parameters + ---------- + value : A python scalar or a numpy array. + The assigned value. + + """ + + def __init__(self, value=0.0): + if value is None: + raise ValueError("value must not be none.") + super(Constant, self).__init__(value=value, force_cpu=False) + self.value = value + + def get_config(self): + return {"value": self.value} + + +class RandomUniform(UniformInitializer): + """Initializer that generates tensors with a uniform distribution. + + Parameters + ---------- + minval : A python scalar or a scalar tensor. + Lower bound of the range of random values to generate. + maxval : A python scalar or a scalar tensor. + Upper bound of the range of random values to generate. + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, minval=-0.05, maxval=0.05, seed=0): + assert minval is not None, 'low should not be None' + assert maxval is not None, 'high should not be None' + assert maxval >= minval, 'high should greater or equal than low' + super(RandomUniform, self).__init__( + low=minval, high=maxval, seed=seed, diag_num=0, diag_step=0, diag_val=1.0) + self.minval = minval + self.maxval = maxval + self.seed = seed + + def get_config(self): + return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed} + + +class RandomNormal(NormalInitializer): + """Initializer that generates tensors with a normal distribution. + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the random values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=0): + assert mean is not None, 'mean should not be None' + assert stddev is not None, 'std should not be None' + super(RandomNormal, self).__init__(loc=mean, scale=stddev, seed=seed) + self.mean = mean + self.stddev = stddev + self.seed = seed + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class TruncatedNormal(TruncatedNormalInitializer): + """Initializer that generates a truncated normal distribution. + + These values are similar to values from a `RandomNormal` + except that values more than two standard deviations from the mean + are discarded and re-drawn. This is the recommended initializer for + neural network weights and filters. + + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the andom values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=0): + assert mean is not None, 'mean should not be None' + assert stddev is not None, 'std should not be None' + super(TruncatedNormal, self).__init__(loc=mean, scale=stddev, seed=seed) + self.mean = mean + self.stddev = stddev + self.seed = seed + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class HeNormal(MSRAInitializer): + """He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, seed=0): + super(HeNormal, self).__init__( + uniform=False, fan_in=None, seed=seed) + self.seed = seed + + def get_config(self): + return {"seed", self.seed} + + +def deconv2d_bilinear_upsampling_initializer(shape): + """Returns the initializer that can be passed to DeConv2dLayer for initializing the + weights in correspondence to channel-wise bilinear up-sampling. + Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211) + + Parameters + ---------- + shape : tuple of int + The shape of the filters, [height, width, output_channels, in_channels]. + It must match the shape passed to DeConv2dLayer. + + Returns + ------- + ``tf.constant_initializer`` + A constant initializer with weights set to correspond to per channel bilinear upsampling + when passed as W_int in DeConv2dLayer + + """ + raise NotImplementedError diff --git a/tensorlayer/initializers.py b/tensorlayer/initializers/tensorflow_initializers.py similarity index 96% rename from tensorlayer/initializers.py rename to tensorlayer/initializers/tensorflow_initializers.py index b7b972115..8865216af 100644 --- a/tensorlayer/initializers.py +++ b/tensorlayer/initializers/tensorflow_initializers.py @@ -6,7 +6,7 @@ __all__ = [ 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', - 'deconv2d_bilinear_upsampling_initializer', 'He_Normal' + 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' ] @@ -175,7 +175,7 @@ def get_config(self): return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} -class He_Normal(Initializer): +class HeNormal(Initializer): """He normal initializer. Parameters @@ -241,13 +241,3 @@ def deconv2d_bilinear_upsampling_initializer(shape): # assign numpy array to constant_initalizer and pass to get_variable return Constant(value=weights) - - -# Alias -zeros = Zeros -ones = Ones -constant = Constant -random_uniform = RandomUniform -random_normal = RandomNormal -truncated_normal = TruncatedNormal -he_normal = He_Normal diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index 4e1630ea1..668736e80 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -16,13 +16,13 @@ # from .expert_conv import * # from .expert_deconv import * # from .group_conv import * -# from .quan_conv import * -# from .quan_conv_bn import * +from .quan_conv import * +from .quan_conv_bn import * # from .separable_conv import * from .simplified_conv import * # from .simplified_deconv import * -# from .super_resolution import * -# from .ternary_conv import * +from .super_resolution import * +from .ternary_conv import * __all__ = [ @@ -71,13 +71,13 @@ # 'SeparableConv2d', # subpixel - # 'SubpixelConv1d', - # 'SubpixelConv2d', + 'SubpixelConv1d', + 'SubpixelConv2d', # ternary - # 'TernaryConv2d', + 'TernaryConv2d', #quan_conv - # 'QuanConv2d', - # 'QuanConv2dWithBN', + 'QuanConv2d', + 'QuanConv2dWithBN', ] diff --git a/tensorlayer/layers/convolution/quan_conv.py b/tensorlayer/layers/convolution/quan_conv.py new file mode 100644 index 000000000..87f4f5256 --- /dev/null +++ b/tensorlayer/layers/convolution/quan_conv.py @@ -0,0 +1,173 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) + +__all__ = ['QuanConv2d'] + + +class QuanConv2d(Module): + """The :class:`QuanConv2d` class is a quantized convolutional layer without BN, which weights are 'bitW' bits and the output of the previous layer + are 'bitA' bits while inferencing. + Note that, the bias vector would not be binarized. + + Parameters + ---------- + With TensorLayer + + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + bitW : int + The bits of this layer's parameter + bitA : int + The bits of the output of previous layer + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inference. + TODO: support gemm + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([8, 12, 12, 64], name='input') + >>> quanconv2d = tl.layers.QuanConv2d( + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d' + ... )(net) + >>> print(quanconv2d) + >>> output shape : (8, 12, 12, 32) + + """ + + def __init__( + self, + bitW=8, + bitA=8, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + act=None, + padding='SAME', + use_gemm=False, + data_format="channels_last", + dilation_rate=(1, 1), + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None # 'quan_cnn2d', + ): + super().__init__(name, act=act) + self.bitW = bitW + self.bitA = bitA + self.n_filter = n_filter + self.filter_size = filter_size + self.strides = self._strides = strides + self.padding = padding + self.use_gemm = use_gemm + self.data_format = data_format + self.dilation_rate = self._dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "QuanConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + if self.use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + if len(self.strides) != 2: + raise ValueError("len(strides) should be 2.") + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) + + self.conv2d = tl.ops.Conv2D(strides=self.strides, padding=self.padding, data_format=self.data_format, + dilations=self._dilation_rate) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + inputs = quantize_active_overflow(inputs, self.bitA) + + W_ = quantize_weight_overflow(self.W, self.bitW) + + outputs = self.conv2d(inputs, W_) + + if self.b_init: + outputs = self.bias_add(outputs, self.b) + if self.act: + outputs = self.act(outputs) + + return outputs diff --git a/tensorlayer/layers/convolution/quan_conv_bn.py b/tensorlayer/layers/convolution/quan_conv_bn.py new file mode 100644 index 000000000..335742b15 --- /dev/null +++ b/tensorlayer/layers/convolution/quan_conv_bn.py @@ -0,0 +1,240 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorflow.python.training import moving_averages +from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow) +from tensorlayer.backend import BACKEND + +__all__ = ['QuanConv2dWithBN'] + + +class QuanConv2dWithBN(Module): + """The :class:`QuanConv2dWithBN` class is a quantized convolutional layer with BN, which weights are 'bitW' bits and the output of the previous layer + are 'bitA' bits while inferencing. + + Note that, the bias vector would keep the same. + + Parameters + ---------- + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + padding : str + The padding algorithm type: "SAME" or "VALID". + act : activation function + The activation function of this layer. + decay : float + A decay factor for `ExponentialMovingAverage`. + Suggest to use a large value for large dataset. + epsilon : float + Eplison. + is_train : boolean + Is being used for training or inference. + beta_init : initializer or None + The initializer for initializing beta, if None, skip beta. + Usually you should not skip beta unless you know what happened. + gamma_init : initializer or None + The initializer for initializing gamma, if None, skip gamma. + bitW : int + The bits of this layer's parameter + bitA : int + The bits of the output of previous layer + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inferencing. (TODO). + W_init : initializer + The initializer for the the weight matrix. + W_init_args : dictionary + The arguments for the weight matrix initializer. + data_format : str + "NHWC" or "NCHW", default is "NHWC". + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + in_channels : int + The number of in channels. + name : str + A unique layer name. + + Examples + --------- + >>> import tensorlayer as tl + >>> net = tl.layers.Input([50, 256, 256, 3]) + >>> layer = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1') + >>> print(layer) + >>> net = tl.layers.QuanConv2dWithBN(n_filter=64, filter_size=(5,5),strides=(1,1),padding='SAME',name='qcnnbn1')(net) + >>> print(net) + """ + + def __init__( + self, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + padding='SAME', + act=None, + decay=0.9, + epsilon=1e-5, + is_train=False, + gamma_init=tl.initializers.truncated_normal(stddev=0.02), + beta_init=tl.initializers.truncated_normal(stddev=0.02), + bitW=8, + bitA=8, + use_gemm=False, + W_init=tl.initializers.truncated_normal(stddev=0.02), + W_init_args=None, + data_format="channels_last", + dilation_rate=(1, 1), + in_channels=None, + name='quan_cnn2d_bn', + ): + super(QuanConv2dWithBN, self).__init__(act=act, name=name) + self.n_filter = n_filter + self.filter_size = filter_size + self.strides = strides + self.padding = padding + self.decay = decay + self.epsilon = epsilon + self.is_train = is_train + self.gamma_init = gamma_init + self.beta_init = beta_init + self.bitW = bitW + self.bitA = bitA + self.use_gemm = use_gemm + self.W_init = W_init + self.W_init_args = W_init_args + self.data_format = data_format + self.dilation_rate = dilation_rate + self.in_channels = in_channels + logging.info( + "QuanConv2dWithBN %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s " % ( + self.name, n_filter, filter_size, str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + if BACKEND == 'mindspore': + raise NotImplementedError("MindSpore backend does not implement this method") + + if self.in_channels: + self.build(None) + self._built = True + + if use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + if len(strides) != 2: + raise ValueError("len(strides) should be 2.") + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + actstr + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self.strides[0], self.strides[1], 1] + self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self.strides[0], self.strides[1]] + self._dilation_rate = [1, 1, self.dilation_rate[0], self.dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + para_bn_shape = (self.n_filter, ) + if self.gamma_init: + self.scale_para = self._get_weights( + "scale_para", shape=para_bn_shape, init=self.gamma_init, trainable=self.is_train + ) + else: + self.scale_para = None + + if self.beta_init: + self.offset_para = self._get_weights( + "offset_para", shape=para_bn_shape, init=self.beta_init, trainable=self.is_train + ) + else: + self.offset_para = None + + self.moving_mean = self._get_weights( + "moving_mean", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False + ) + self.moving_variance = self._get_weights( + "moving_variance", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False + ) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + x = inputs + inputs = quantize_active_overflow(inputs, self.bitA) # Do not remove + outputs = tf.nn.conv2d( + input=x, filters=self.W, strides=self._strides, padding=self.padding, data_format=self.data_format, + dilations=self._dilation_rate, name=self.name + ) + + mean, variance = tf.nn.moments(outputs, axes=list(range(len(outputs.get_shape()) - 1))) + + update_moving_mean = moving_averages.assign_moving_average( + self.moving_mean, mean, self.decay, zero_debias=False + ) # if zero_debias=True, has bias + update_moving_variance = moving_averages.assign_moving_average( + self.moving_variance, mean, self.decay, zero_debias=False + ) # if zero_debias=True, has bias + + if self.is_train: + mean, var = self.mean_var_with_update(update_moving_mean, update_moving_variance, mean, variance) + else: + mean, var = self.moving_mean, self.moving_variance + + w_fold = self._w_fold(self.W, self.scale_para, var, self.epsilon) + + W_ = quantize_weight_overflow(w_fold, self.bitW) + + conv_fold = tf.nn.conv2d(inputs, W_, strides=self.strides, padding=self.padding, data_format=self.data_format) + + if self.beta_init: + bias_fold = self._bias_fold(self.offset_para, self.scale_para, mean, var, self.epsilon) + conv_fold = tf.nn.bias_add(conv_fold, bias_fold, name='bn_bias_add') + + if self.act: + conv_fold = self.act(conv_fold) + + return conv_fold + + def mean_var_with_update(self, update_moving_mean, update_moving_variance, mean, variance): + with tf.control_dependencies([update_moving_mean, update_moving_variance]): + return tf.identity(mean), tf.identity(variance) + + def _w_fold(self, w, gama, var, epsilon): + return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) + + def _bias_fold(self, beta, gama, mean, var, epsilon): + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index 161034f51..e78dfbd8a 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -149,11 +149,9 @@ def forward(self, inputs): outputs = self.conv1d(inputs, self.W) if self.b_init_flag: - outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) + outputs = self.bias_add(outputs, self.b) if self.act_init_flag: outputs = self.act(outputs) - if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': - outputs = tl.nchw_to_nhwc(outputs) return outputs @@ -445,7 +443,7 @@ def forward(self, inputs): outputs = self.conv3d(inputs, self.W) if self.b_init_flag: - outputs = tl.ops.bias_add(outputs, self.b, data_format=self.data_format) + outputs = self.bias_add(outputs, self.b) if self.act_init_flag: outputs = self.act(outputs) return outputs @@ -594,8 +592,6 @@ def forward(self, inputs): outputs = self.bias_add(outputs, self.b) if self.act_init_flag: outputs = self.act(outputs) - if tl.BACKEND == 'mindspore' and self.data_format == 'NWC': - outputs = tl.nchw_to_nhwc(outputs) return outputs @@ -742,8 +738,6 @@ def forward(self, inputs): outputs = self.bias_add(outputs, self.b) if self.act_init_flag: outputs = self.act(outputs) - if tl.BACKEND == 'mindspore' and self.data_format == 'NHWC': - outputs = tl.nchw_to_nhwc(outputs) return outputs @@ -896,6 +890,4 @@ def forward(self, inputs): outputs = self.bias_add(outputs, self.b) if self.act_init_flag: outputs = self.act(outputs) - if tl.BACKEND == 'mindspore' and self.data_format == 'NDHWC': - outputs = tl.nchw_to_nhwc(outputs) return outputs diff --git a/tensorlayer/layers/convolution/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py new file mode 100644 index 000000000..102ef52e2 --- /dev/null +++ b/tensorlayer/layers/convolution/super_resolution.py @@ -0,0 +1,214 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module + +__all__ = [ + 'SubpixelConv1d', + 'SubpixelConv2d', +] + + +class SubpixelConv1d(Module): + """It is a 1D sub-pixel up-sampling layer. + + Calls a TensorFlow function that directly implements this functionality. + We assume input has dim (batch, width, r) + + Parameters + ------------ + scale : int + The up-scaling ratio, a wrong setting will lead to Dimension size error. + act : activation function + The activation function of this layer. + in_channels : int + The number of in channels. + name : str + A unique layer name. + + Examples + ---------- + With TensorLayer + + >>> net = tl.layers.Input([8, 25, 32], name='input') + >>> subpixelconv1d = tl.layers.SubpixelConv1d(scale=2, name='subpixelconv1d')(net) + >>> print(subpixelconv1d) + >>> output shape : (8, 50, 16) + + References + ----------- + `Audio Super Resolution Implementation `__. + + """ + + def __init__( + self, + scale=2, + act=None, + in_channels=None, + name=None # 'subpixel_conv1d' + ): + super().__init__(name, act=act) + self.scale = scale + self.in_channels = in_channels + # self.out_channels = int(self.in_channels / self.scale) + + if self.in_channels is not None: + self.build(None) + self._built = True + + logging.info( + "SubpixelConv1d %s: scale: %d act: %s" % + (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(in_channels={in_channels}, out_channels={out_channels}') + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if inputs_shape is not None: + self.in_channels = inputs_shape[-1] + self.out_channels = int(self.in_channels / self.scale) + self.transpose = tl.ops.Transpose(perm=[2, 1, 0]) + self.batch_to_space = tl.ops.BatchToSpace(block_size=[self.scale], crops=[[0, 0]]) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self._PS(inputs) + if self.act is not None: + outputs = self.act(outputs) + return outputs + + def _PS(self, I): + X = self.transpose(I) # (r, w, b) + X = self.batch_to_space(X) # (1, r*w, b) + X = self.transpose(X) + return X + + +class SubpixelConv2d(Module): + """It is a 2D sub-pixel up-sampling layer, usually be used + for Super-Resolution applications, see `SRGAN `__ for example. + + Parameters + ------------ + scale : int + The up-scaling ratio, a wrong setting will lead to dimension size error. + n_out_channel : int or None + The number of output channels. + - If None, automatically set n_out_channel == the number of input channels / (scale x scale). + - The number of input channels == (scale x scale) x The number of output channels. + act : activation function + The activation function of this layer. + in_channels : int + The number of in channels. + name : str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> # examples here just want to tell you how to set the n_out_channel. + >>> net = tl.layers.Input([2, 16, 16, 4], name='input1') + >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=2, n_out_channels=1, name='subpixel_conv2d1')(net) + >>> print(subpixelconv2d) + >>> output shape : (2, 32, 32, 1) + + >>> net = tl.layers.Input([2, 16, 16, 4*10], name='input2') + >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=2, n_out_channels=10, name='subpixel_conv2d2')(net) + >>> print(subpixelconv2d) + >>> output shape : (2, 32, 32, 10) + + >>> net = tl.layers.Input([2, 16, 16, 25*10], name='input3') + >>> subpixelconv2d = tl.layers.SubpixelConv2d(scale=5, n_out_channels=10, name='subpixel_conv2d3')(net) + >>> print(subpixelconv2d) + >>> output shape : (2, 80, 80, 10) + + References + ------------ + - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network `__ + + """ + + # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py + def __init__( + self, + scale=2, + n_out_channels=None, + act=None, + in_channels=None, + name=None # 'subpixel_conv2d' + ): + super().__init__(name, act=act) + self.scale = scale + self.n_out_channels = n_out_channels + self.in_channels = in_channels + + if self.in_channels is not None: + self.build(None) + self._built = True + logging.info( + "SubpixelConv2d %s: scale: %d act: %s" % + (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + ) + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ('{classname}(in_channels={in_channels}, out_channels={n_out_channels}') + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + + if inputs_shape is not None: + self.in_channels = inputs_shape[-1] + + if self.in_channels / (self.scale**2) % 1 != 0: + raise Exception( + "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" + ) + self.n_out_channels = int(self.in_channels / (self.scale**2)) + self.depth_to_space = tl.ops.DepthToSpace(block_size=self.scale) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self._PS(X=inputs, r=self.scale, n_out_channels=self.n_out_channels) + if self.act is not None: + outputs = self.act(outputs) + return outputs + + def _PS(self, X, r, n_out_channels): + + _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" + + if n_out_channels >= 1: + if int(X.get_shape()[-1]) != (r**2) * n_out_channels: + raise Exception(_err_log) + + X = self.depth_to_space(input=X) + else: + raise RuntimeError(_err_log) + + return X diff --git a/tensorlayer/layers/convolution/ternary_conv.py b/tensorlayer/layers/convolution/ternary_conv.py new file mode 100644 index 000000000..5b60ae052 --- /dev/null +++ b/tensorlayer/layers/convolution/ternary_conv.py @@ -0,0 +1,166 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.layers.utils import compute_alpha, ternary_operation + +__all__ = ['TernaryConv2d'] + + +class TernaryConv2d(Module): + """ + The :class:`TernaryConv2d` class is a 2D ternary CNN layer, which weights are either -1 or 1 or 0 while inference. + + Note that, the bias vector would not be tenarized. + + Parameters + ---------- + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + use_gemm : boolean + If True, use gemm instead of ``tf.matmul`` for inference. + TODO: support gemm + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([8, 12, 12, 32], name='input') + >>> ternaryconv2d = tl.layers.TernaryConv2d( + ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d' + ... )(net) + >>> print(ternaryconv2d) + >>> output shape : (8, 12, 12, 64) + + """ + + def __init__( + self, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + act=None, + padding='SAME', + use_gemm=False, + data_format="channels_last", + dilation_rate=(1, 1), + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None # 'ternary_cnn2d', + ): + super().__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self.strides = self._strides = strides + self.padding = padding + self.use_gemm = use_gemm + self.data_format = data_format + self.dilation_rate = self._dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "TernaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + if use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") + + if len(self.strides) != 2: + raise ValueError("len(strides) should be 2.") + + def __repr__(self): + actstr = self.act.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) + + self.conv2d = tl.ops.Conv2D(strides=self._strides, padding=self.padding, data_format=self.data_format, + dilations=self._dilation_rate) + + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + alpha = compute_alpha(self.W) + + W_ = ternary_operation(self.W) + W_ = tl.ops.multiply(alpha, W_) + + outputs = self.conv2d(inputs, W_) + + if self.b_init: + outputs = self.bias_add(outputs, self.b) + if self.act: + outputs = self.act(outputs) + + return outputs diff --git a/tensorlayer/layers/core/__init__.py b/tensorlayer/layers/core/__init__.py index b2898a330..a3b3f95c7 100644 --- a/tensorlayer/layers/core/__init__.py +++ b/tensorlayer/layers/core/__init__.py @@ -2,7 +2,14 @@ # -*- coding: utf-8 -*- from tensorlayer.backend import BACKEND + if BACKEND == 'mindspore': from .core_mindspore import * -elif BACKEND in ['tensorflow', 'dragon']: - from .core_tensorflow_dragon import * +elif BACKEND == 'tensorflow': + from .core_tensorflow import * +elif BACKEND == 'paddle': + from .core_paddle import * +elif BACKEND == 'dragon': + from .core_dragon import * +else: + raise ("Unsupported backend:", BACKEND) diff --git a/tensorlayer/layers/core/core_tensorflow_dragon.py b/tensorlayer/layers/core/core_dragon.py similarity index 97% rename from tensorlayer/layers/core/core_tensorflow_dragon.py rename to tensorlayer/layers/core/core_dragon.py index 66ca67472..f07772c2c 100644 --- a/tensorlayer/layers/core/core_tensorflow_dragon.py +++ b/tensorlayer/layers/core/core_dragon.py @@ -1,26 +1,19 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -from .common import str2act, _save_weights, _load_weights -from tensorlayer.backend.ops.load_backend import BACKEND -from collections import OrderedDict +#TODO Dragon Module needs a better implementation + import time +import dragon as dg import tensorlayer as tl from tensorlayer.layers.utils import (get_variable_with_initializer) +from .common import str2act, _save_weights, _load_weights +from collections import OrderedDict from tensorlayer import logging __all__ = ['Module', 'SequentialLayer', 'LayerList'] -_global_layer_name_dict = {} # TODO: better implementation? - -if BACKEND == 'tensorflow': - import tensorflow as tf - Parameter_ = tf.Variable -elif BACKEND == 'dragon': - import dragon as dg - Parameter_ = dg.Tensor # TODO the dragon parameter is a initializers -else: - raise NotImplementedError("This backend is not supported") - +_global_layer_name_dict = {} +Parameter_ = dg.Tensor class Module(object): @@ -142,10 +135,8 @@ def __setattr__(self, name, value): object.__setattr__(self, name, value) def __call__(self, inputs, *args, **kwargs): - if BACKEND in ['tensorflow', 'dragon']: - output = self.forward(inputs, *args, **kwargs) - else: - exit("Unsupported backend") + + output = self.forward(inputs, *args, **kwargs) return output diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index 726780cf4..b8bfe0d50 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -54,7 +54,7 @@ def __init__(self, name=None, act=None, *args, **kwargs): self.act = act # Layer building state - # self._built = False + self._built = False # Layer nodes state self._nodes = [] @@ -68,6 +68,10 @@ def __init__(self, name=None, act=None, *args, **kwargs): # Layer training state self.is_train = True + + # layer forward state + self._forward_state = False + def forward(self, *inputs, **kwargs): raise Exception("The forward method must be implemented by inherited class") diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py new file mode 100644 index 000000000..19b56ee60 --- /dev/null +++ b/tensorlayer/layers/core/core_paddle.py @@ -0,0 +1,206 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import copy, six +import tensorlayer as tl +from .common import str2act +from paddle.fluid import framework +from paddle.fluid.dygraph import Layer +from paddle.fluid.framework import in_dygraph_mode + + +_global_layer_name_dict = {} # TODO: better implementation? + + +class Module(Layer): + + def __init__(self, name=None, act=None, *args, **kwargs): + super().__init__(*args, **kwargs) + + global _global_layer_name_dict + if name is None: + prefix = self.__class__.__name__.lower() + + if _global_layer_name_dict.get(prefix) is not None: + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + _global_layer_name_dict[prefix] = 0 + name = prefix + while True: + if _global_layer_name_dict.get(name) is None: + break + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + if _global_layer_name_dict.get(name) is not None: + pass + else: + _global_layer_name_dict[name] = 0 + + self.name = name + + if isinstance(act, str): + str_act = str2act(act) + + if act: + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + self.act = str_act + elif isinstance(act, str): + self.act = str_act() + else: + self.act = act() + else: + self.act = act + + # Layer building state + # self._built = False + + # Layer nodes state + self._nodes = [] + self._nodes_fixed = False + + # Layer weight state + self._all_weights = [] + self._trainable_weights = [] + self._nontrainable_weights = [] + + # Layer training state + self.is_train = True + + # layer forward state + self._forward_state = False + + def set_train(self): + """ + Sets this Layer and all its sublayers to training mode. + This only effects certain modules like `Dropout` and `BatchNorm`. + + Returns: + None + + Example:: + .. code-block:: python + + import paddle + + class MyLayer(paddle.nn.Layer): + def __init__(self): + super(MyLayer, self).__init__() + self._linear = paddle.nn.Linear(1, 1) + self._dropout = paddle.nn.Dropout(p=0.5) + + def forward(self, input): + temp = self._linear(input) + temp = self._dropout(temp) + return temp + + x = paddle.randn([10, 1], 'float32') + mylayer = MyLayer() + mylayer.eval() # set mylayer._dropout to eval mode + out = mylayer(x) + mylayer.train() # set mylayer._dropout to train mode + out = mylayer(x) + + """ + # global setting in dygraph + # NOTE(chenweihang): nn.Layer also can be used in static mode, + # but _dygraph_tracer() can not be called in static mode + if in_dygraph_mode(): + framework._dygraph_tracer().train_mode() + # Layer-level setting + self.training = True + for layer in self.sublayers(): + layer.training = True + + def set_eval(self): + """ + Sets this Layer and all its sublayers to evaluation mode. + This only effects certain modules like `Dropout` and `BatchNorm`. + + Returns: + None + + Example:: + .. code-block:: python + + import paddle + + class MyLayer(paddle.nn.Layer): + def __init__(self): + super(MyLayer, self).__init__() + self._linear = paddle.nn.Linear(1, 1) + self._dropout = paddle.nn.Dropout(p=0.5) + + def forward(self, input): + temp = self._linear(input) + temp = self._dropout(temp) + return temp + + x = paddle.randn([10, 1], 'float32') + mylayer = MyLayer() + mylayer.eval() # set mylayer._dropout to eval mode + out = mylayer(x) + print(out) + + """ + # global setting in dygraph + # NOTE(chenweihang): nn.Layer also can be used in static mode, + # but _dygraph_tracer() can not be called in static mode + if in_dygraph_mode(): + framework._dygraph_tracer().eval_mode() + # Layer-level setting + self.training = False + for layer in self.sublayers(): + layer.training = False + + def build(self, inputs_shape): + raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") + + def forward(self, *inputs, **kwargs): + raise Exception("The forward method must be implemented by inherited class") + + + def _get_weights(self, var_name, shape, init=None, trainable=True): + if var_name in ["filters", "weights"]: + w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False) + elif var_name in ["biases"]: + w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=True) + else: + w_tmp = self.create_parameter(shape=shape, attr=init) + self.trainable = trainable + return w_tmp + + def create_parameter(self, + shape, + attr=None, + dtype=None, + is_bias=False, + default_initializer=None): + """Create parameters for this layer.""" + temp_attr = copy.deepcopy(attr) + if isinstance(temp_attr, six.string_types) and temp_attr == "": + temp_attr = None + return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, + default_initializer) + + @property + def all_weights(self): + ret = [ + param + for _, param in self.named_parameters( + include_sublayers=True) + ] + return ret + + @property + def trainable_weights(self): + return self.parameters() + + def init_build(self, *inputs, **kwargs): + """ + (1) This method must be called when the Layer has no input in_channels. + (2) Automatic shape inference when the user does not enter inchannels. + """ + + self.forward(*inputs, **kwargs) \ No newline at end of file diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py new file mode 100644 index 000000000..0f703881b --- /dev/null +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -0,0 +1,765 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from .common import str2act, _save_weights, _load_weights +from collections import OrderedDict +import time +import tensorlayer as tl +import tensorflow as tf +from tensorlayer.layers.utils import (get_variable_with_initializer) +from tensorlayer import logging + +__all__ = ['Module', 'SequentialLayer', 'LayerList'] + +_global_layer_name_dict = {} +Parameter_ = tf.Variable + + +class Module(object): + + def __init__(self, name=None, act=None, *args, **kwargs): + self._params = OrderedDict() + self._layers = OrderedDict() + self._params_status = OrderedDict() + self._parameter_layout_dict = {} + self._create_time = int(time.time() * 1e9) + + global _global_layer_name_dict + if name is None: + prefix = self.__class__.__name__.lower() + + if _global_layer_name_dict.get(prefix) is not None: + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + _global_layer_name_dict[prefix] = 0 + name = prefix + while True: + if _global_layer_name_dict.get(name) is None: + break + _global_layer_name_dict[prefix] += 1 + name = prefix + '_' + str(_global_layer_name_dict[prefix]) + else: + if _global_layer_name_dict.get(name) is not None: + pass + else: + _global_layer_name_dict[name] = 0 + + self.name = name + + if isinstance(act, str): + str_act = str2act(act) + + if act: + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + self.act = str_act + elif isinstance(act, str): + self.act = str_act() + else: + self.act = act() + else: + self.act = act + + # Layer building state + self._built = False + + # Layer nodes state + self._nodes = [] + self._nodes_fixed = False + + # Layer weight state + self._all_weights = [] + self._trainable_weights = [] + self._nontrainable_weights = [] + + # layer forward state + self._forward_state = False + + # Layer training state + self.is_train = True + + def extend_repr(self): + """ + Sets the extended representation of the Module. + + To print customized extended information, re-implement this method in your own Layers. + """ + return '' + + def __repr__(self): + extra_str = self.extend_repr() + info_str = self.__class__.__name__ + '<' + if self._layers: + sub_str = '\n' + if extra_str: + sub_str += '{}\n'.format(self.extend_repr()) + for key, value in self._layers.items(): + sub_str += '({}): {}\n'.format(key, repr(value)) + sub_str = sub_str.replace('\n', '\n ') + '>' + info_str += sub_str + else: + info_str += extra_str + '>' + return info_str + + def __setattr__(self, name, value): + layers = self.__dict__.get('_layers') + params = self.__dict__.get('_params') + + if isinstance(value, Parameter_): + if params is None: + raise AttributeError("Can not assign params before Module.__init__() call.") + if name in self.__dict__: + if self.__dict__[name] is not None: + raise TypeError("Expected type is not in (Parameter, Module), but got Parameter.") + del self.__dict__[name] + if layers and name in layers: + raise TypeError("Expected type is Module, but got Parameter.") + self.insert_param_to_layer(name, value) + + elif isinstance(value, Module): + if layers is None: + raise AttributeError("Can not assign layers before Module.__init__() call.") + if name in self.__dict__: + del self.__dict__[name] + if params and name in params: + raise TypeError("Expected type is Parameter, but got Module.") + # TODO How to prompt the user, enter the in_channels. + # TODO Automatic shape inference when the user does not enter inchannels. + # if value._built is False: + # raise AttributeError( + # "The registered layer `{}` should be built in advance. " + # "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) + # ) + layers[name] = value + else: + object.__setattr__(self, name, value) + + def __call__(self, inputs, *args, **kwargs): + + output = self.forward(inputs, *args, **kwargs) + + return output + + def forward(self, *inputs, **kwargs): + raise Exception("The forward method must be implemented by inherited class") + + def build(self, inputs_shape): + raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") + + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + """ Get trainable variables. """ + weight = get_variable_with_initializer( + scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable + ) + self.trainable = trainable + return weight + + def save_weights(self, file_path, format=None): + """Input file_path, save model weights into a file of given format.""" + _save_weights(self, file_path, format) + + def load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(self, file_path, format, in_order, skip) + + def _set_mode_for_layers(self, is_train): + """Set all layers of this network to a given mode. + + Parameters + ---------- + is_train : boolean + Network's mode. True means training mode while False means evaluation mode. + + """ + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + if isinstance(layer, Module): + layer.is_train = is_train + + + def set_train(self): + """Set this network in training mode. After calling this method, + all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. + TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.vgg16() + >>> net.set_train() + + """ + if self.is_train !=True: + self.is_train = True + self._set_mode_for_layers(True) + + def set_eval(self): + """Set this network in evaluation mode. After calling this method, + all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. + TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. + Examples + -------- + >>> import tensorlayer as tl + >>> net = tl.vgg16() + >>> net.eval() + # do evaluation + + """ + if self.is_train != False: + self.is_train = False + self._set_mode_for_layers(False) + + def test(self): + """Set this network in evaluation mode.""" + self.eval() + + def infer(self): + """Set this network in evaluation mode.""" + self.eval() + + @staticmethod + def _compute_shape(tensors): + if isinstance(tensors, list): + shape_mem = [tl.get_tensor_shape(t) for t in tensors] + else: + shape_mem = tl.get_tensor_shape(tensors) + return shape_mem + + def insert_param_to_layer(self, param_name, param, check_name=True): + """ + Adds a parameter to the current layer. + + Inserts a parameter with given name to the layer. Please refer to the usage in + source code of `tensorlayer.layer.Module.__setattr__`. + + Args: + param_name (str): Name of the parameter. + param (Parameter): Parameter to be inserted to the layer. + check_name (bool): Determines whether the name input is compatible. Default: True. + + Raises: + KeyError: If the name of parameter is null or contains dot. + AttributeError: If user did not call init() first. + TypeError: If the type of parameter is not Parameter_. + """ + if not param_name: + raise KeyError("The name of parameter should not be null.") + if check_name and '.' in param_name: + raise KeyError("The name of parameter should not contain \".\"") + if '_params' not in self.__dict__: + raise AttributeError("You need call init() first.") + if hasattr(self, param_name) and param_name not in self._params: + raise KeyError("Duplicated parameter name '{}'.".format(param_name)) + if not isinstance(param, Parameter_) and param is not None: + raise TypeError("The type of parameter should be 'Parameter' if not None.") + self._params[param_name] = param + try: + self._params_status[param_name] = self.trainable + except: + pass + + def _add_node(self, input_tensors, output_tensors): + """Add a LayerNode for this layer given input_tensors, output_tensors. + + WARINING: This function should not be called from outside, it should only be called + in layer.__call__ when building static model. + + Parameters + ---------- + input_tensors : Tensor or a list of tensors + Input tensors to this layer. + output_tensors : Tensor or a list of tensors + Output tensors to this layer. + + """ + raise NotImplementedError + + @property + def create_time(self): + return self._create_time + + def __getattr__(self, name): + if '_params' in self.__dict__: + params = self.__dict__['_params'] + if name in params: + return params[name] + if '_layers' in self.__dict__: + layers = self.__dict__['_layers'] + if name in layers: + return layers[name] + if '_params_status' in self.__dict__: + params_status = self.__dict__['_params_status'] + if name in params_status: + return params_status[name] + raise AttributeError("'{}' object has no attribute '{}'.".format(type(self).__name__, name)) + + def __delattr__(self, name): + if name in self._params: + del self._params[name] + elif name in self._layers: + del self._layers[name] + else: + object.__delattr__(self, name) + + @property + def trainable_weights(self): + """ + Returns all trainable weights. + + Returns a list of all trainable parmeters. + + Args: + recurse (bool): Whether contains the trainable weights of sublayers. Default: True. + + Returns: + List, the list of trainable weights. + """ + self.get_weights() + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] ==True: + self._trainable_weights.append(params[1]) + return self._trainable_weights + + @property + def nontrainable_weights(self): + """ + Returns all untrainable weights. + + Returns a list of all untrainable weights. + + Args: + recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. + + Returns: + List, the list of untrainable weights. + """ + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] == False: + self._nontrainable_weights.append(params[1]) + return self._nontrainable_weights + + @property + def all_weights(self): + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + for par, val in params: + self._all_weights.append(val) + return self._all_weights + + def get_weights(self, expand=True): + """ + Returns an iterator over layer weights. + + Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. + + Args: + expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples: + >>> net = Net() + >>> for item in net.get_weights(): + >>> print(item) + """ + for _, param in self.parameters_and_names(expand=expand): + yield param + + def check_names(self): + names = set("") + for value, param in self.parameters_and_names(): + if param.name in names: + raise ValueError( + "The value of {} is {}, its name '{}' already exists.".format(value, param, param.name) + ) + names.add(param.name) + + def insert_child_to_layer(self, child_name, child): + """ + Adds a child layer to the current layer. + + Args: + child_name (str): Name of the child layer. + child (Module): The child layer to be inserted. + + Raises: + KeyError: Child Module's name is incorrect or duplicated with the other child name. + TypeError: Child Module's type is incorrect. + """ + if not child_name or '.' in child_name: + raise KeyError("Child layer name is incorrect.") + if hasattr(self, child_name) and child_name not in self._layers: + raise KeyError("Duplicate child name '{}'.".format(child_name)) + if not isinstance(child, Module) and child is not None: + raise TypeError("Child layer type is incorrect.") + self._layers[child_name] = child + + def parameters_and_names(self, name_prefix='', expand=True): + """ + Returns an iterator over layer parameters. + + Includes the parameter's name and itself. + + Args: + name_prefix (str): Namespace. Default: ''. + expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples: + >>> n = Net() + >>> names = [] + >>> for m in n.parameters_and_names(): + >>> if m[0]: + >>> names.append(m[0]) + """ + layers = [] + if expand: + layers = self.layers_and_names(name_prefix=name_prefix) + else: + layers.append((name_prefix, self)) + + params_set = set() + for layer_name, layer in layers: + params = layer._params.items() + for par_name, par in params: + if par.inited_param is not None: + par = par.inited_param + if par is not None and id(par) not in params_set: + params_set.add(id(par)) + par_new_name = par_name + if layer_name: + par_new_name = layer_name + '.' + par_new_name + + yield par_new_name, par + + def layers_and_names(self, layers=None, name_prefix=''): + """ + Returns an iterator over all layers in the network. + + Includes the layer's name and itself. + + Args: + layers (str): layers to iterate over. Default: None. + name_prefix (str): Namespace. Default: ''. + + Examples: + >>> n = Net() + >>> names = [] + >>> for m in n.layers_and_names(): + >>> if m[0]: + >>> names.append(m[0]) + """ + t_layers = layers if layers else set() + if self in t_layers: + return + + t_layers.add(self) + yield name_prefix, self + + for name, layer in self._layers.items(): + if layer: + layers_name_prefix = name + if name_prefix: + layers_name_prefix = name_prefix + '.' + layers_name_prefix + for ele in layer.layers_and_names(t_layers, layers_name_prefix): + yield ele + + def layers(self): + """Returns an iterator over immediate layers.""" + return self.name_layers().values() + + def name_layers(self): + """ + Returns an iterator over all layers in the network. + + Include name of the layer and layer itself. + """ + value_set = set() + layers = OrderedDict() + for name, layer in self._layers.items(): + if layer is not None and layer not in value_set: + value_set.add(layer) + layers[name] = layer + return layers + + def init_build(self, *inputs, **kwargs): + """ + (1) This method must be called when the Layer has no input in_channels. + (2) Automatic shape inference when the user does not enter inchannels. + """ + + self.forward(*inputs, **kwargs) + + +class SequentialLayer(Module): + """ + Sequential layer container. + + A list of Layers will be added to it in the order they are passed in the constructor. + Alternatively, an ordered dict of layers can also be passed in. + + Args: + args (list, OrderedDict): List of subclass of Module. + + Raises: + TypeError: If the type of the argument is not list or OrderedDict. + + Inputs: + - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. + + Outputs: + Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + + Examples: + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) + """ + def __init__(self, *args): + super(SequentialLayer, self).__init__() + self._built = True + if len(args) == 1: + layers = args[0] + if isinstance(layers, list): + for index, layer in enumerate(layers): + self.insert_child_to_layer(str(index), layer) + elif isinstance(layers, OrderedDict): + for name, layer in layers.items(): + self.insert_child_to_layer(name, layer) + else: + raise TypeError('Layers must be list or orderedDict') + else: + for index, layer in enumerate(args): + self.insert_child_to_layer(str(index), layer) + self.layer_list = list(self._layers.values()) + + def __getitem__(self, index): + if isinstance(index, slice): + return self.__class__( + OrderedDict(list(self._layers.items())[index])) + index = self._valid_index(len(self), index) + return list(self._layers.values())[index] + + def __setitem__(self, index, layer): + if self._valid_module(layer): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + self._layers[key] = layer + self.layer_list = list(self._layers.values()) + + def __delitem__(self, index): + if isinstance(index, int): + index = self._valid_index(len(self), index) + key = list(self._layers.keys())[index] + del self._layers[key] + elif isinstance(index, slice): + keys = list(self._layers.keys())[index] + for key in keys: + del self._layers[key] + else: + raise TypeError('Index {} is not int type or slice type'.format(index)) + self.layer_list = list(self._layers.values()) + + def __len__(self): + return len(self._layers) + + + def append(self, layer): + if self._valid_module(layer): + self._layers[str(len(self))] = layer + self.layer_list = list(self._layers.values()) + return self + + def build(self, inputs_shape): + pass + + def forward(self, input_data): + for layer in self.layer_list: + input_data = layer(input_data) + return input_data + + def _valid_index(self, layer_num, index): + if not isinstance(index, int): + raise TypeError("Index {} is not int type") + if not -layer_num <= index < layer_num: + raise IndexError("Index should be a number in range [{}, {}), but got {}" + .format(-layer_num, layer_num, index)) + return index % layer_num + + def _valid_module(self, layer): + if issubclass(layer.__class__, Module): + return True + raise TypeError('Module {} is not subclass of Module'.format(layer)) + + +class LayerList(Module): + """ + The class :class:`LayerList` is a linear stack of layers. + + The :class:`LayerList` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. + + Parameters + ---------- + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + + Methods + --------- + __init__() + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. + """ + + def __init__(self, layers, name=None): + """ + Initializing the LayerList given a list of Layer. + + :param layers: list of Layer + :param name: str or None + """ + + super(LayerList, self).__init__(name=name) + self.layers = layers + is_built = True + for layer in self.layers: + self._trainable_weights.extend(layer.trainable_weights) + self._nontrainable_weights.extend(layer.nontrainable_weights) + if layer._built is False: + is_built = False + # if layer._built and layer.all_weights is not None: + # # some layers in the list passed in have already been built + # # e.g. using input shape to construct layers in dynamic eager + # if self._all_weights is None: + # self._all_weights = list() + # self._all_weights.extend(layer.all_weights) + if is_built: + self._built = True + + logging.info( + "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) + ) + + # check layer name uniqueness in LayerList + local_layer_name_set = set() + for layer in self.layers: + if layer.name not in local_layer_name_set: + local_layer_name_set.add(layer.name) + else: + raise ValueError( + 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % + layer.name + ) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return LayerList(list(self.layers)[idx]) + else: + return self.layers[idx] + + def __len__(self): + return len(self.layers) + + def __repr__(self): + tmpstr = 'LayerList' + '(\n' + for idx, layer in enumerate(self.layers): + modstr = layer.__repr__() + modstr = _addindent(modstr, 2) + tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' + + tmpstr = tmpstr + ')' + return tmpstr + + @property + def trainable_weights(self): + return self._trainable_weights + + @property + def nontrainable_weights(self): + return self._nontrainable_weights + + @property + def all_weights(self): + return self._trainable_weights + self._nontrainable_weights + + # def build(self, inputs_shape): + # """ + # Build the LayerList. The layer instances will be connected automatically one by one. + # """ + # in_tensor = self._input_tensors + # # in_layer = self._input_layer + # for layer in self.layers: + # is_build = layer._built + # out_tensor = layer(in_tensor) + # # nlayer = layer(in_layer) + # if is_build is False and layer.all_weights is not None: + # if self._all_weights is None: + # self._all_weights = list() + # self._all_weights.extend(layer.all_weights) + # layer._built = True + # in_tensor = out_tensor + # # in_layer = nlayer + + def forward(self, inputs): + """ + Forward the computation. The computation will go through all layer instances. + """ + z = inputs + for layer in self.layers: + z = layer.forward(z) + return z + + def _set_mode_for_layers(self, is_train): + """Set training/evaluation mode for all layer instances.""" + self.is_train = is_train + for layer in self.layers: + if isinstance(layer, LayerList): + layer._set_mode_for_layers(is_train) + else: + layer.is_train = is_train + + def get_args(self): + init_args = {} + layers = self.layer_args["layers"] + init_args["layers"] = [layer.config for layer in layers] + init_args.update({"layer_type": "layerlist"}) + return init_args + +def tolist(tensors): + if isinstance(tensors, list) or isinstance(tensors, tuple): + ntensors = list() + for t in tensors: + ntensors += tolist(t) + return ntensors + else: + return [tensors] + +def _addindent(s_, numSpaces): + s = s_.split('\n') + # don't do anything for single-line stuff + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(numSpaces * ' ') + line for line in s] + s = '\n'.join(s) + s = first + '\n' + s + return s \ No newline at end of file diff --git a/tensorlayer/layers/inputs.py b/tensorlayer/layers/inputs.py index 44202584e..34a877869 100644 --- a/tensorlayer/layers/inputs.py +++ b/tensorlayer/layers/inputs.py @@ -23,24 +23,18 @@ class _InputLayer(Module): """ - def __init__(self, shape, dtype=tl.float32, name=None): + def __init__(self, shape, dtype=tl.float32, name=None, init=None): super(_InputLayer, self).__init__(name) - # if isinstance(dtype, str): - # try: - # dtype = eval(dtype) - # except Exception as e: - # raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) - # if not isinstance(dtype, tl.DType): - # raise RuntimeError("%s is not a valid dtype for InputLayer." % (dtype)) - logging.info("Input %s: %s" % (self.name, str(shape))) - self.shape = shape # shape is needed in __repr__ + self.shape = shape self.dtype = dtype self.shape_without_none = [_ if _ is not None else 1 for _ in shape] - self.outputs = tl.initializers.ones()(self.shape_without_none, dtype=self.dtype) + if init is None: + self.outputs = tl.initializers.ones()(self.shape_without_none, dtype=self.dtype) + else: + self.outputs = init(self.shape_without_none, dtype=self.dtype) self._built = True - # self._add_node(outputs, outputs) def __repr__(self): s = 'Input(shape=%s' % str(self.shape) @@ -50,23 +44,12 @@ def __repr__(self): return s def __call__(self, *args, **kwargs): - # return super(_InputLayer, self).__call__(inputs) return self.outputs def build(self, inputs_shape): pass def forward(self): - # tl.initializers.random_uniform() - # tl.initializers.random_normal() - # tl.initializers.truncated_normal() - # tl.initializers.constant(2.0) - # tl.initializers.He_Normal() - # tl.initializers.He_Normal() - # tl.initializers.zeros() - # tl.initializers.ones() - - # outputs = self.inputs(self.shape_without_none, dtype=self.dtype) return self.outputs @@ -82,7 +65,6 @@ def Input(shape, init=tl.initializers.ones(), dtype=tl.float32, name=None): A unique layer name. """ - input_layer = _InputLayer(shape, dtype=dtype, name=name) - outputs = input_layer(init) - # outputs = input_layer._nodes[0].out_tensors[0] + input_layer = _InputLayer(shape, dtype=dtype, name=name, init=init) + outputs = input_layer() return outputs diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index edeec330c..23d611a40 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -6,1259 +6,3 @@ import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module - -# TODO: uncomment -__all__ = [ - 'RNN', - 'SimpleRNN', - 'GRURNN', - 'LSTMRNN', - 'BiRNN', - # 'ConvRNNCell', - # 'BasicConvLSTMCell', - # 'ConvLSTM', - 'retrieve_seq_length_op', - 'retrieve_seq_length_op2', - 'retrieve_seq_length_op3', - 'target_mask_op', -] - - -class RNN(Module): - """ - The :class:`RNN` class is a fixed length recurrent layer for implementing simple RNN, - LSTM, GRU and etc. - - Parameters - ---------- - cell : TensorFlow cell function - A RNN cell implemented by tf.keras - - E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell - - Note TF2.0+, TF1.0+ and TF1.0- are different - - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - - Examples - -------- - For synced sequence input and output, see `PTB example `__ - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.RNN( - >>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1), - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='lstmrnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0], rnn_state[1]], name='rnn_model') - >>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM. - - A stacked RNN model. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out1 = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> return_last_output=False, return_seq_2d=False, return_last_state=False - >>> )(inputs) - >>> rnn_out2 = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> return_last_output=True, return_last_state=False - >>> )(rnn_out1) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out2) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) - - An example if the sequences have different length and contain padding. - Similar to the DynamicRNN in TL 1.x. - - If the `sequence_length` is provided in RNN's forwarding and both `return_last_output` and `return_last_state` - are set as `True`, the forward function will automatically ignore the paddings. Note that if `return_last_output` - is set as `False`, the synced sequence outputs will still include outputs which correspond with paddings, - but users are free to select which slice of outputs to be used in following procedure. - - The `sequence_length` should be a list of integers which indicates the length of each sequence. - It is recommended to - `tl.layers.retrieve_seq_length_op3 `__ - to calculate the `sequence_length`. - - >>> data = [[[1], [2], [0], [0], [0]], [[1], [2], [3], [0], [0]], [[1], [2], [6], [1], [1]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> class DynamicRNNExample(tl.models.Model): - >>> def __init__(self): - >>> super(DynamicRNNExample, self).__init__() - >>> self.rnnlayer = tl.layers.RNN( - >>> cell=tf.keras.layers.SimpleRNNCell(units=6, dropout=0.1), in_channels=1, return_last_output=True, - >>> return_last_state=True - >>> ) - >>> def forward(self, x): - >>> z, s = self.rnnlayer(x, sequence_length=tl.layers.retrieve_seq_length_op3(x)) - >>> return z, s - >>> model = DynamicRNNExample() - >>> model.eval() - >>> output, state = model(data) - - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - cell, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'rnn' - ): - - super(RNN, self).__init__(name=name) - - self.cell = cell - self.return_last_output = return_last_output - self.return_seq_2d = return_seq_2d - self.return_last_state = return_last_state - - if in_channels is not None: - self.build((None, None, in_channels)) - self._built = True - - logging.info("RNN %s: cell: %s, n_units: %s" % (self.name, self.cell.__class__.__name__, self.cell.units)) - - def __repr__(self): - s = ('{classname}(cell={cellname}, n_units={n_units}') - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, cellname=self.cell.__class__.__name__, n_units=self.cell.units, - **self.__dict__ - ) - - def build(self, inputs_shape): - """ - Parameters - ---------- - inputs_shape : tuple - the shape of inputs tensor - """ - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - if len(inputs_shape) != 3: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - with tf.name_scope(self.name) as scope: - self.cell.build(tuple(inputs_shape)) - - if self._trainable_weights is None: - self._trainable_weights = list() - for var in self.cell.trainable_variables: - self._trainable_weights.append(var) - - # @tf.function - def forward(self, inputs, sequence_length=None, initial_state=None, **kwargs): - """ - Parameters - ---------- - inputs : input tensor - The input of a network - sequence_length: None or list of integers - The actual length of each sequence in batch without padding. - If provided, when `return_last_output` and `return_last_state` are `True`, - the RNN will perform in the manner of a dynamic RNN, i.e. - the RNN will return the actual last output / state without padding. - initial_state : None or list of Tensor (RNN State) - If None, `initial_state` is zero state. - - **kwargs: dict - Some attributes can be updated during forwarding - such as `return_last_output`, `return_seq_2d`, `return_last_state`. - """ - if kwargs: - for attr in kwargs: - if attr in self.__dict__: - setattr(self, attr, kwargs[attr]) - - batch_size = inputs.get_shape().as_list()[0] - total_steps = inputs.get_shape().as_list()[1] - - # checking the type and values of sequence_length - if sequence_length is not None: - if isinstance(sequence_length, list): - pass - elif isinstance(sequence_length, tf.Tensor): - pass - elif isinstance(sequence_length, np.ndarray): - sequence_length = sequence_length.tolist() - else: - raise TypeError( - "The argument sequence_length should be either None or a list of integers. " - "Type got %s" % type(sequence_length) - ) - if (len(sequence_length) != batch_size): - raise ValueError( - "The argument sequence_length should contain %d " % batch_size + - "elements indicating the initial length of each sequence, but got only %d. " % len(sequence_length) - ) - for i in sequence_length: - if not (type(i) is int or (isinstance(i, tf.Tensor) and i.dtype.is_integer)): - raise TypeError( - "The argument sequence_length should be either None or a list of integers. " - "One element of sequence_length has the type %s" % type(i) - ) - if i > total_steps: - raise ValueError( - "The actual length of a sequence should not be longer than " - "that of the longest sequence (total steps) in this mini-batch. " - "Total steps of this mini-batch %d, " % total_steps + - "but got an actual length of a sequence %d" % i - ) - - sequence_length = tl.layers.retrieve_seq_length_op3(inputs) - - sequence_length = [i - 1 if i >= 1 else 0 for i in sequence_length] - - # set warning - # if (not self.return_last_output) and sequence_length is not None: - # warnings.warn( - # 'return_last_output is set as %s ' % self.return_last_output + - # 'When sequence_length is provided, it is recommended to set as True. ' + - # 'Otherwise, padding will be considered while RNN is forwarding.' - # ) - - # return the last output, iterating each seq including padding ones. No need to store output during each - # time step. - if self.return_last_output and sequence_length is None: - outputs = [-1] - else: - outputs = list() - - # initialize the states if provided - states = initial_state if initial_state is not None else self.cell.get_initial_state(inputs) - if not isinstance(states, list): - states = [states] - - stored_states = list() - - # initialize the cell - self.cell.reset_dropout_mask() - self.cell.reset_recurrent_dropout_mask() - - # recurrent computation - # FIXME: if sequence_length is provided (dynamic rnn), only iterate max(sequence_length) times. - for time_step in range(total_steps): - - cell_output, states = self.cell.call(inputs[:, time_step, :], states, training=self.is_train) - stored_states.append(states) - - if self.return_last_output and sequence_length is None: - outputs[-1] = cell_output - else: - outputs.append(cell_output) - - # prepare to return results - if self.return_last_output and sequence_length is None: - outputs = outputs[-1] - - elif self.return_last_output and sequence_length is not None: - outputs = tf.convert_to_tensor(outputs) - outputs = tf.gather(outputs, sequence_length, axis=0) - - outputs_without_padding = [] - for i in range(batch_size): - outputs_without_padding.append(outputs[i][i][:]) - outputs = tf.convert_to_tensor(outputs_without_padding) - else: - if self.return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 2D Tensor [batch_size * n_steps, n_hidden] - outputs = tf.reshape(tf.concat(outputs, 1), [-1, self.cell.units]) - else: - # : stack more RNN layer after that - # 3D Tensor [batch_size, n_steps, n_hidden] - outputs = tf.reshape(tf.concat(outputs, 1), [-1, total_steps, self.cell.units]) - - if self.return_last_state and sequence_length is None: - return outputs, states - elif self.return_last_state and sequence_length is not None: - - stored_states = tf.convert_to_tensor(stored_states) - stored_states = tf.gather(stored_states, sequence_length, axis=0) - - states = [] - for i in range(stored_states.shape[1]): - states.append(tf.convert_to_tensor([stored_states[b, i, b, :] for b in range(batch_size)])) - - return outputs, states - else: - return outputs - - -class SimpleRNN(RNN): - """ - The :class:`SimpleRNN` class is a fixed length recurrent layer for implementing simple RNN. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For simple RNN, last_state = [last_output] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the simple RNN cell. - Please check tf.keras.layers.SimpleRNNCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.SimpleRNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the simple rnn cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='simplernn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'simplernn' - **kwargs - ): - super(SimpleRNN, self).__init__( - cell=tf.keras.layers.SimpleRNNCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class GRURNN(RNN): - """ - The :class:`GRURNN` class is a fixed length recurrent layer for implementing RNN with GRU cell. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For GRU, last_state = [last_output] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the GRU cell. - Please check tf.keras.layers.GRUCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.GRURNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the GRU cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='grurnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'grurnn' - **kwargs - ): - super(GRURNN, self).__init__( - cell=tf.keras.layers.GRUCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class LSTMRNN(RNN): - """ - The :class:`LSTMRNN` class is a fixed length recurrent layer for implementing RNN with LSTM cell. - - Parameters - ---------- - units: int - Positive integer, the dimension of hidden space. - return_last_output : boolean - Whether return last output or all outputs in a sequence. - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to stack more RNNs on this layer, set to False - - In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). - By default, `False`. - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the RNN cell. The state is a list of Tensor. - For LSTM, last_state = [last_output, last_cell_state] - - - If True, the layer will return outputs and the final state of the cell. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - `**kwargs`: - Advanced arguments to configure the LSTM cell. - Please check tf.keras.layers.LSTMCell. - - Examples - -------- - - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out, lstm_state = tl.layers.LSTMRNN( - >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the LSTM cell. - >>> in_channels=embedding_size, - >>> return_last_output=True, return_last_state=True, name='grurnn' - >>> )(inputs) - >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - units, - return_last_output=False, - return_seq_2d=False, - return_last_state=True, - in_channels=None, - name=None, # 'lstmrnn' - **kwargs - ): - super(LSTMRNN, self).__init__( - cell=tf.keras.layers.LSTMCell(units=units, **kwargs), return_last_output=return_last_output, - return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name - ) - - -class BiRNN(Layer): - """ - The :class:`BiRNN` class is a fixed length Bidirectional recurrent layer. - - Parameters - ---------- - fw_cell : TensorFlow cell function for forward direction - A RNN cell implemented by tf.keras, e.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell. - Note TF2.0+, TF1.0+ and TF1.0- are different - bw_cell: TensorFlow cell function for backward direction similar with `fw_cell` - return_seq_2d : boolean. - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. - In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). - By default, `False`. - return_last_state: boolean - Whether to return the last state of the two cells. The state is a list of Tensor. - - If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`. - - If False, the layer will return outputs only. - - In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). - By default, `False`. - in_channels: int - Optional, the number of channels of the previous layer which is normally the size of embedding. - If given, the layer will be built when init. - If None, it will be automatically detected when the layer is forwarded for the first time. - name : str - A unique layer name. - - Examples - -------- - A simple regression model below. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> # the fw_cell and bw_cell can be different - >>> rnnlayer = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=True, return_last_state=True - >>> ) - >>> # if return_last_state=True, the final state of the two cells will be returned together with the outputs - >>> # if return_last_state=False, only the outputs will be returned - >>> rnn_out, rnn_fw_state, rnn_bw_state = rnnlayer(inputs) - >>> # if the BiRNN is followed by a Dense, return_seq_2d should be True. - >>> # if the BiRNN is followed by other RNN, return_seq_2d can be False. - >>> dense = tl.layers.Dense(n_units=1)(rnn_out) - >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]]) - - A stacked BiRNN model. - - >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) - >>> rnn_out1 = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=False, return_last_state=False - >>> )(inputs) - >>> rnn_out2 = tl.layers.BiRNN( - >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), - >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), - >>> return_seq_2d=True, return_last_state=False - >>> )(rnn_out1) - >>> dense = tl.layers.Dense(n_units=1)(rnn_out2) - >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) - >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`. - - """ - - def __init__( - self, - fw_cell, - bw_cell, - return_seq_2d=False, - return_last_state=False, - in_channels=None, - name=None, # 'birnn' - ): - super(BiRNN, self).__init__(name) - - self.fw_cell = fw_cell - self.bw_cell = bw_cell - self.return_seq_2d = return_seq_2d - self.return_last_state = return_last_state - - if in_channels is not None: - self.build((None, None, in_channels)) - self._built = True - - logging.info( - "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % ( - self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__, - self.bw_cell.units - ) - ) - - def __repr__(self): - s = ( - '{classname}(fw_cell={fw_cellname}, fw_n_units={fw_n_units}' - ', bw_cell={bw_cellname}, bw_n_units={bw_n_units}' - ) - s += ', name=\'{name}\'' - s += ')' - return s.format( - classname=self.__class__.__name__, fw_cellname=self.fw_cell.__class__.__name__, - fw_n_units=self.fw_cell.units, bw_cellname=self.bw_cell.__class__.__name__, bw_n_units=self.bw_cell.units, - **self.__dict__ - ) - - def build(self, inputs_shape): - """ - Parameters - ---------- - inputs_shape : tuple - the shape of inputs tensor - """ - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - if len(inputs_shape) != 3: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - with tf.name_scope(self.name) as scope: - self.fw_cell.build(tuple(inputs_shape)) - self.bw_cell.build(tuple(inputs_shape)) - - if self._trainable_weights is None: - self._trainable_weights = list() - for var in self.fw_cell.trainable_variables: - self._trainable_weights.append(var) - for var in self.bw_cell.trainable_variables: - self._trainable_weights.append(var) - - # @tf.function - def forward(self, inputs, fw_initial_state=None, bw_initial_state=None, **kwargs): - """ - Parameters - ---------- - inputs : input tensor - The input of a network - fw_initial_state : None or list of Tensor (RNN State) - If None, `fw_initial_state` is zero state. - bw_initial_state : None or list of Tensor (RNN State) - If None, `bw_initial_state` is zero state. - **kwargs: dict - Some attributes can be updated during forwarding - such as `return_last_output`, `return_seq_2d`, `return_last_state`. - """ - - if kwargs: - for attr in kwargs: - if attr in self.__dict__: - setattr(self, attr, kwargs[attr]) - - fw_outputs = list() - bw_outputs = list() - - fw_states = fw_initial_state if fw_initial_state is not None else self.fw_cell.get_initial_state(inputs) - bw_states = bw_initial_state if bw_initial_state is not None else self.bw_cell.get_initial_state(inputs) - - if not isinstance(fw_states, list): - fw_states = [fw_states] - if not isinstance(bw_states, list): - bw_states = [bw_states] - - total_steps = inputs.get_shape().as_list()[1] - - self.fw_cell.reset_dropout_mask() - self.fw_cell.reset_recurrent_dropout_mask() - self.bw_cell.reset_dropout_mask() - self.bw_cell.reset_recurrent_dropout_mask() - - for time_step in range(total_steps): - fw_cell_output, fw_states = self.fw_cell.call(inputs[:, time_step, :], fw_states, training=self.is_train) - bw_cell_output, bw_states = self.bw_cell.call( - inputs[:, -time_step - 1, :], bw_states, training=self.is_train - ) - - fw_outputs.append(fw_cell_output) - bw_outputs.append(bw_cell_output) - - if self.return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 2D Tensor [batch_size * n_steps, n_hidden] - fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, self.fw_cell.units]) - bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, self.bw_cell.units]) - else: - # : stack more RNN layer after that - # 3D Tensor [batch_size, n_steps, n_hidden] - fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, total_steps, self.fw_cell.units]) - bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, total_steps, self.bw_cell.units]) - - outputs = tf.concat([fw_outputs, bw_outputs], -1) - - if self.return_last_state: - return outputs, fw_states, bw_states - else: - return outputs - - -''' -class ConvRNNCell(object): - """Abstract object representing an Convolutional RNN Cell.""" - - def __call__(self, inputs, state, scope=None): - """Run this RNN cell on inputs, starting from the given state.""" - raise NotImplementedError("Abstract method") - - @property - def state_size(self): - """size(s) of state(s) used by this cell.""" - raise NotImplementedError("Abstract method") - - @property - def output_size(self): - """Integer or TensorShape: size of outputs produced by this cell.""" - raise NotImplementedError("Abstract method") - - def zero_state(self, batch_size): #, dtype=LayersConfig.tf_dtype): - """Return zero-filled state tensor(s). - Args: - batch_size: int, float, or unit Tensor representing the batch size. - Returns: - tensor of shape '[batch_size x shape[0] x shape[1] x num_features] - filled with zeros - - """ - dtype = LayersConfig.tf_dtype - shape = self.shape - num_features = self.num_features - # TODO : TypeError: 'NoneType' object is not subscriptable - zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) - return zeros - - -class BasicConvLSTMCell(ConvRNNCell): - """Basic Conv LSTM recurrent network cell. - - Parameters - ----------- - shape : tuple of int - The height and width of the cell. - filter_size : tuple of int - The height and width of the filter - num_features : int - The hidden size of the cell - forget_bias : float - The bias added to forget gates (see above). - input_size : int - Deprecated and unused. - state_is_tuple : boolen - If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. - If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. - act : activation function - The activation function of this layer, tanh as default. - - """ - - def __init__( - self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, - act=tf.nn.tanh - ): - """Initialize the basic Conv LSTM cell.""" - # if not state_is_tuple: - # logging.warn("%s: Using a concatenated state is slower and will soon be " - # "deprecated. Use state_is_tuple=True.", self) - if input_size is not None: - logging.warn("%s: The input_size parameter is deprecated.", self) - self.shape = shape - self.filter_size = filter_size - self.num_features = num_features - self._forget_bias = forget_bias - self._state_is_tuple = state_is_tuple - self._activation = act - - @property - def state_size(self): - """State size of the LSTMStateTuple.""" - return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) - - @property - def output_size(self): - """Number of units in outputs.""" - return self._num_units - - def __call__(self, inputs, state, scope=None): - """Long short-term memory cell (LSTM).""" - with tf.compat.v1.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" - # Parameters of gates are concatenated into one multiply for efficiency. - if self._state_is_tuple: - c, h = state - else: - # print state - # c, h = tf.split(3, 2, state) - c, h = tf.split(state, 2, 3) - concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) - - # i = input_gate, j = new_input, f = forget_gate, o = output_gate - # i, j, f, o = tf.split(3, 4, concat) - i, j, f, o = tf.split(concat, 4, 3) - - new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) - new_h = self._activation(new_c) * tf.nn.sigmoid(o) - - if self._state_is_tuple: - new_state = LSTMStateTuple(new_c, new_h) - else: - new_state = tf.concat([new_c, new_h], 3) - return new_h, new_state - - -def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): - """convolution: - - Parameters - ---------- - args : tensor - 4D Tensor or a list of 4D, batch x n, Tensors. - filter_size : tuple of int - Filter height and width. - num_features : int - Nnumber of features. - bias_start : float - Starting value to initialize the bias; 0 by default. - scope : VariableScope - For the created subgraph; defaults to "Linear". - - Returns - -------- - - A 4D Tensor with shape [batch h w num_features] - - Raises - ------- - - ValueError : if some of the arguments has unspecified or wrong shape. - - """ - # Calculate the total size of arguments on dimension 1. - total_arg_size_depth = 0 - shapes = [a.get_shape().as_list() for a in args] - for shape in shapes: - if len(shape) != 4: - raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) - if not shape[3]: - raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) - else: - total_arg_size_depth += shape[3] - - dtype = [a.dtype for a in args][0] - - # Now the computation. - with tf.compat.v1.variable_scope(scope or "Conv"): - matrix = tf.compat.v1.get_variable( - "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype - ) - if len(args) == 1: - res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') - else: - res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') - if not bias: - return res - bias_term = tf.compat.v1.get_variable( - "Bias", [num_features], dtype=dtype, - initializer=tf.compat.v1.initializers.constant(bias_start, dtype=dtype) - ) - return res + bias_term - - -class ConvLSTM(Layer): - """A fixed length Convolutional LSTM layer. - - See this `paper `__ . - - Parameters - ---------- - prev_layer : :class:`Layer` - Previous layer - cell_shape : tuple of int - The shape of each cell width * height - filter_size : tuple of int - The size of filter width * height - cell_fn : a convolutional RNN cell - Cell function like :class:`BasicConvLSTMCell` - feature_map : int - The number of feature map in the layer. - initializer : initializer - The initializer for initializing the parameters. - n_steps : int - The sequence length. - initial_state : None or ConvLSTM State - If None, `initial_state` is zero state. - return_last : boolean - Whether return last output or all outputs in each step. - - If True, return the last output, "Sequence input and single output". - - If False, return all outputs, "Synced sequence input and output". - - In other word, if you want to stack more RNNs on this layer, set to False. - - return_seq_2d : boolean - Only consider this argument when `return_last_output` is `False` - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. - - name : str - A unique layer name. - - Attributes - ---------- - outputs : tensor - The output of this RNN. return_last_output = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, h, w, c]) - - final_state : tensor or StateTuple - The finial state of this layer. - - When state_is_tuple = False, it is the final hidden and cell states, - - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. - - initial_state : tensor or StateTuple - It is the initial state of this ConvLSTM layer, you can use it to initialize - your state at the beginning of each epoch or iteration according to your - training procedure. - - batch_size : int or tensor - Is int, if able to compute the batch_size, otherwise, tensor for ``?``. - - """ - - @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__( - self, - prev_layer, - cell_shape=None, - feature_map=1, - filter_size=(3, 3), - cell_fn=BasicConvLSTMCell, - initializer=tf.compat.v1.initializers.random_uniform(-0.1, 0.1), - n_steps=5, - initial_state=None, - return_last=False, - return_seq_2d=False, - name='convlstm', - ): - super(ConvLSTM, self).__init__(prev_layer=prev_layer, name=name) - - logging.info( - "ConvLSTM %s: feature_map: %d, n_steps: %d, " - "in_dim: %d %s, cell_fn: %s " % - (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) - ) - # You can get the dimension by .get_shape() or ._shape, and check the - # dimension by .with_rank() as follow. - # self.inputs.get_shape().with_rank(2) - # self.inputs.get_shape().with_rank(3) - - # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] - try: - self.inputs.get_shape().with_rank(5) - except Exception: - raise Exception( - "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " - "input_y, feature_map]" - ) - - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - - if fixed_batch_size.value: - batch_size = fixed_batch_size.value - logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) - - else: - batch_size = array_ops.shape(self.inputs)[0] - logging.info(" non specified batch_size, uses a tensor instead.") - self.batch_size = batch_size - outputs = [] - self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) - - if initial_state is None: - self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) - else: - self.initial_state = initial_state - - state = self.initial_state - - # with tf.variable_scope("model", reuse=None, initializer=initializer): - with tf.compat.v1.variable_scope(name, initializer=initializer) as vs: - for time_step in range(n_steps): - if time_step > 0: tf.compat.v1.get_variable_scope().reuse_variables() - (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) - outputs.append(cell_output) - - # Retrieve just the RNN variables. - # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] - rnn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES, scope=vs.name) - - logging.info(" n_params : %d" % (len(rnn_variables))) - - if return_last: - # 2D Tensor [batch_size, n_hidden] - self.outputs = outputs[-1] - else: - if return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 4D Tensor [n_example, h, w, c] - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) - else: - # : stack more RNN layer after that - # 5D Tensor [n_example/n_steps, n_steps, h, w, c] - self.outputs = tf.reshape( - tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map] - ) - - self.final_state = state - - self._add_layers(self.outputs) - self._add_params(rnn_variables) - -''' - - -# @tf.function -def retrieve_seq_length_op(data): - """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max), n_features] with zero padding on right hand side. - - Examples - ----------- - Single feature - - >>> data = [[[1],[2],[0],[0],[0]], - >>> [[1],[2],[3],[0],[0]], - >>> [[1],[2],[6],[1],[0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op(data) - [2 3 4] - - Multiple features - - >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], - >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op(data) - [4 3 4] - - References - ------------ - Borrow from `TFlearn `__. - - """ - with tf.name_scope('GetLength'): - used = tf.sign(tf.reduce_max(input_tensor=tf.abs(data), axis=2)) - length = tf.reduce_sum(input_tensor=used, axis=1) - - return tf.cast(length, tf.int32) - - -# @tf.function -def retrieve_seq_length_op2(data): - """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max)] with zero padding on right hand side. - - Examples - ----------- - >>> data = [[1,2,0,0,0], - >>> [1,2,3,0,0], - >>> [1,2,6,1,0]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op2(data) - tensor([2 3 4]) - - """ - return tf.reduce_sum(input_tensor=tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), axis=1) - - -# @tf.function -def retrieve_seq_length_op3(data, pad_val=0): - """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or - [batch_size, n_step(max), n_features]. - - If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the - length of the string sequence. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side. - pad_val: - By default 0. If the data is tf.string, please assign this as empty string ('') - - Examples - ----------- - >>> data = [[[1],[2],[0],[0],[0]], - >>> [[1],[2],[3],[0],[0]], - >>> [[1],[2],[6],[1],[0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([2, 3, 4]) - >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], - >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([4, 3, 4]) - >>> data = [[1,2,0,0,0], - >>> [1,2,3,0,0], - >>> [1,2,6,1,0]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> length = tl.layers.retrieve_seq_length_op3(data) - tensor([2, 3, 4]) - >>> data = [['hello','world','','',''], - >>> ['hello','world','tensorlayer','',''], - >>> ['hello','world','tensorlayer','2.0','']] - >>> data = tf.convert_to_tensor(data, dtype=tf.string) - >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='') - tensor([2, 3, 4]) - - """ - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.reduce_sum( - input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), - axis=1 - ) - elif data_shape_size == 2: - return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1) - elif data_shape_size == 1: - raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list()) - else: - raise ValueError( - "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size) - ) - - -def target_mask_op(data, pad_val=0): - """ Return the mask of the input sequence data based on the padding values. - - Parameters - ----------- - data : tf.Tensor - A tensor with 2 or 3 dimensions. - pad_val: int, float, string, etc - The value that represent padding. By default, 0. For tf.string, you may use empty string. - - Examples - ----------- - >>> data = [['hello', 'world', '', '', ''], - >>> ['hello', 'world', 'tensorlayer', '', ''], - >>> ['hello', 'world', 'tensorlayer', '2.0', '']] - >>> data = tf.convert_to_tensor(data, dtype=tf.string) - >>> mask = tl.layers.target_mask_op(data, pad_val='') - >>> print(mask) - tf.Tensor( - [[1 1 0 0 0] - [1 1 1 0 0] - [1 1 1 1 0]], shape=(3, 5), dtype=int32) - >>> data = [[[1], [0], [0], [0], [0]], - >>> [[1], [2], [3], [0], [0]], - >>> [[1], [2], [0], [1], [0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> mask = tl.layers.target_mask_op(data) - >>> print(mask) - tf.Tensor( - [[1 0 0 0 0] - [1 1 1 0 0] - [1 1 0 1 0]], shape=(3, 5), dtype=int32) - >>> data = [[[0,0],[2,2],[1,2],[1,2],[0,0]], - >>> [[2,3],[2,4],[3,2],[1,0],[0,0]], - >>> [[3,3],[0,1],[5,3],[1,2],[0,0]]] - >>> data = tf.convert_to_tensor(data, dtype=tf.float32) - >>> mask = tl.layers.target_mask_op(data) - >>> print(mask) - tf.Tensor( - [[0 1 1 1 0] - [1 1 1 1 0] - [1 1 1 1 0]], shape=(3, 5), dtype=int32) - """ - - if not isinstance(data, tf.Tensor): - raise AttributeError("target_mask_op: the type of input data should be tf.Tensor but got %s." % type(data)) - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) - elif data_shape_size == 2: - return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) - elif data_shape_size == 1: - raise ValueError( - "target_mask_op: data_shape %s is not supported. " - "The shape of data should have 2 or 3 dims." % (data.get_shape()) - ) - else: - raise ValueError( - "target_mask_op: handling data_shape %s hasn't been implemented! " - "The shape of data should have 2 or 3 dims" % (data.get_shape()) - ) diff --git a/tensorlayer/optimizers/load_optimizers_backend.py b/tensorlayer/optimizers/load_optimizers_backend.py index 478f61fb5..31a905aaa 100644 --- a/tensorlayer/optimizers/load_optimizers_backend.py +++ b/tensorlayer/optimizers/load_optimizers_backend.py @@ -5,10 +5,12 @@ from tensorlayer.backend.ops.load_backend import BACKEND if BACKEND == 'tensorflow': - from .tensorflow_optimizer import * + from .tensorflow_optimizers import * elif BACKEND == 'mindspore': - from .mindspore_optimizer import * + from .mindspore_optimizers import * elif BACKEND == 'dragon': from .dragon_optimizers import * +elif BACKEND == 'paddle': + from .paddle_optimizers import * else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/optimizers/mindspore_optimizer.py b/tensorlayer/optimizers/mindspore_optimizers.py similarity index 100% rename from tensorlayer/optimizers/mindspore_optimizer.py rename to tensorlayer/optimizers/mindspore_optimizers.py diff --git a/tensorlayer/optimizers/paddle_optimizers.py b/tensorlayer/optimizers/paddle_optimizers.py new file mode 100644 index 000000000..cbc1c2a85 --- /dev/null +++ b/tensorlayer/optimizers/paddle_optimizers.py @@ -0,0 +1,44 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function + +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + +# Add module aliases + +# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' +Adadelta = None + +# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' +Adagrad = None + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' +Adam = None + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' +Admax = None + +# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, +# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 +Ftrl = None + +# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', +Nadam = None + +# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' +RMSprop = None + +# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' +SGD = None + +# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False +Momentum = None + + +def Lamb(**kwargs): + raise Exception('Lamb optimizer function not implemented') + + +def LARS(**kwargs): + raise Exception('LARS optimizer function not implemented') diff --git a/tensorlayer/optimizers/tensorflow_optimizer.py b/tensorlayer/optimizers/tensorflow_optimizers.py similarity index 100% rename from tensorlayer/optimizers/tensorflow_optimizer.py rename to tensorlayer/optimizers/tensorflow_optimizers.py From 56653b657d6f026a0979f379c40a4f6fc51bf745 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 12 May 2021 11:33:46 +0800 Subject: [PATCH 08/36] add cost --- .../tutorial_paddle_tensorlayer_mlp.py | 24 +- tensorlayer/cost/__init__.py | 2 +- tensorlayer/cost/paddle_cost.py | 585 +++++++++++++++++- tensorlayer/models/core.py | 85 ++- 4 files changed, 651 insertions(+), 45 deletions(-) diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py index e67477e71..ea2f46826 100644 --- a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py +++ b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py @@ -3,7 +3,6 @@ import os os.environ['TL_BACKEND'] = 'paddle' -import paddle.nn.functional as F from paddle.vision.transforms import Compose, Normalize import paddle @@ -36,22 +35,7 @@ def forward(self, x): train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) -def train(model): - model.train() - epochs = 2 - optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.trainable_weights) - for epoch in range(epochs): - for batch_id, data in enumerate(train_loader()): - x_data = data[0] - y_data = data[1] - predicts = model(x_data) - loss = F.cross_entropy(predicts, y_data) - acc = paddle.metric.accuracy(predicts, y_data) - loss.backward() - if batch_id % 50 == 0: - print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy())) - optim.step() - optim.clear_grad() -model = MLP() -train(model) - +net = MLP() +optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=net.trainable_weights) +model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) +model.train(n_epoch=20, train_dataset=train_loader, print_freq=5, print_train_batch=True) diff --git a/tensorlayer/cost/__init__.py b/tensorlayer/cost/__init__.py index 76fa2c6de..3ca7c2c81 100644 --- a/tensorlayer/cost/__init__.py +++ b/tensorlayer/cost/__init__.py @@ -10,6 +10,6 @@ elif BACKEND == 'dragon': pass elif BACKEND == 'paddle': - from .mindspore_cost import * + from .paddle_cost import * else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py index a07f0de0d..dc0615bf1 100644 --- a/tensorlayer/cost/paddle_cost.py +++ b/tensorlayer/cost/paddle_cost.py @@ -1,2 +1,585 @@ #! /usr/bin/python -# -*- coding: utf-8 -*- \ No newline at end of file +# -*- coding: utf-8 -*- + +import paddle.nn.functional as F +import paddle as pd + +__all__ = [ + 'cross_entropy', + 'sigmoid_cross_entropy', + 'binary_cross_entropy', + 'mean_squared_error', + 'normalized_mean_square_error', + 'absolute_difference_error', + 'dice_coe', + 'dice_hard_coe', + 'iou_coe', + 'cross_entropy_seq', + 'cross_entropy_seq_with_mask', + 'cosine_similarity', + 'li_regularizer', + 'lo_regularizer', + 'maxnorm_regularizer', + 'maxnorm_o_regularizer', + 'maxnorm_i_regularizer', +] + +def cross_entropy(output, target): + """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, + it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. + + Parameters + ---------- + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor + A batch of index with shape: [batch_size, ]. + name : string + Name of this loss. + + Examples + -------- + >>> import tensorlayer as tl + >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits) + + References + ----------- + - About cross-entropy: ``__. + - The code is borrowed from: ``__. + + """ + + return F.cross_entropy(input=output, label=target) + + +def sigmoid_cross_entropy(output, target): + """Sigmoid cross-entropy operation, see ``tf.ops.sigmoid_cross_entropy_with_logits``. + + Parameters + ---------- + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor + A batch of index with shape: [batch_size, ]. + name : string + Name of this loss. + + """ + + if output.shape[-1] == target.shape[-1]: + pass + else: + depth = output.shape[-1] + label = pd.fluid.layers.one_hot(target, depth=depth) + out = pd.fluid.layers.sigmoid_cross_entropy_with_logits(x=output, label=label) + out = pd.fluid.layers.reduce_mean(out) + return out + + +def binary_cross_entropy(output, target, epsilon=1e-8): + """Binary cross entropy operation. + + Parameters + ---------- + output : Tensor + Tensor with type of `float32` or `float64`. + target : Tensor + The target distribution, format the same with `output`. + epsilon : float + A small value to avoid output to be zero. + name : str + An optional name to attach to this function. + + References + ----------- + - `ericjang-DRAW `__ + + """ + + if output.shape[-1] == target.shape[-1]: + pass + else: + depth = output.shape[-1] + target = pd.fluid.layers.one_hot(target, depth=depth) + out = pd.fluid.layers.reduce_sum( + -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon)) + ) + return out + + +def mean_squared_error(output, target, is_mean=False, axis=-1, name="mean_squared_error"): + """Return the TensorFlow expression of mean-square-error (L2) of two batch of data. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + References + ------------ + - `Wiki Mean Squared Error `__ + + """ + + if output.shape[-1] == target.shape[-1]: + pass + else: + depth = output.shape[-1] + target = pd.fluid.layers.one_hot(target, depth=depth) + + if is_mean: + mse = F.mse_loss(input=output, label=target, reduction='mean') + else: + mse = F.mse_loss(input=output, label=target, reduction='sum') + return mse + + +def normalized_mean_square_error(output, target, axis=-1, name="normalized_mean_squared_error_loss"): + """Return the TensorFlow expression of normalized mean-square-error of two distributions. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + """ + + if output.shape[-1] == target.shape[-1]: + pass + else: + depth = output.shape[-1] + target = pd.fluid.layers.one_hot(target, depth=depth) + + nmse_a = pd.sqrt(pd.fluid.layers.reduce_sum(pd.fluid.layers.square_error_cost(output, target), dim=axis)) + nmse_b = pd.sqrt(pd.fluid.layers.reduce_sum(pd.square(target), dim=axis)) + nmse = pd.fluid.layers.reduce_mean(nmse_a / nmse_b) + return nmse + + +def absolute_difference_error(output, target, is_mean=False, axis=-1, name="absolute_difference_error_loss"): + """Return the TensorFlow expression of absolute difference error (L1) of two batch of data. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). + axis : int or list of int + The dimensions to reduce. + name : str + An optional name to attach to this function. + + """ + + + if is_mean: + loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_mean(pd.abs(output - target), axis)) + else: + loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_sum(pd.abs(output - target), axis)) + return loss + + +def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): + """Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation + i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match. + + Parameters + ----------- + output : Tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : Tensor + The target distribution, format the same with `output`. + loss_type : str + ``jaccard`` or ``sorensen``, default is ``jaccard``. + axis : tuple of int + All dimensions are reduced, default ``[1,2,3]``. + smooth : float + This small value will be added to the numerator and denominator. + - If both output and target are empty, it makes sure dice is 1. + - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice. + + Examples + --------- + >>> import tensorlayer as tl + >>> outputs = tl.act.pixel_wise_softmax(outputs) + >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_) + + References + ----------- + - `Wiki-Dice `__ + + """ + + inse = pd.fluid.layers.reduce_sum(output * target, dim=axis) + if loss_type == 'jaccard': + l = pd.fluid.layers.reduce_sum(output * output, dim=axis) + r = pd.fluid.layers.reduce_sum(target * target, dim=axis) + elif loss_type == 'sorensen': + l = pd.fluid.layers.reduce_sum(output, dim=axis) + r = pd.fluid.layers.reduce_sum(target, dim=axis) + else: + raise Exception("Unknow loss_type") + + dice = (2. * inse + smooth) / (l + r + smooth) + dice = pd.fluid.layers.reduce_mean(dice) + return dice + + +def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Sørensen–Dice coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation i.e. labels are binary. + The coefficient between 0 to 1, 1 if totally match. + + Parameters + ----------- + output : tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : tensor + The target distribution, format the same with `output`. + threshold : float + The threshold value to be true. + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. + + References + ----------- + - `Wiki-Dice `__ + + """ + + raise NotImplementedError("Not Implemented.") + + +def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Intersection over Union (IoU) for comparing the + similarity of two batch of data, usually be used for evaluating binary image segmentation. + The coefficient between 0 to 1, and 1 means totally match. + + Parameters + ----------- + output : tensor + A batch of distribution with shape: [batch_size, ....], (any dimensions). + target : tensor + The target distribution, format the same with `output`. + threshold : float + The threshold value to be true. + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. + + Notes + ------ + - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. + + """ + + raise NotImplementedError("Not Implemented.") + + +def sequence_loss_by_example( + logits, targets, weights, average_across_timesteps=True, softmax_loss_function=None, name=None +): + """Weighted cross-entropy loss for a sequence of logits (per example). see original tensorflow code : + + + Parameters + ---------- + logits: List + List of 2D Tensors of shape [batch_size x num_decoder_symbols]. + targets: List + List of 1D batch-sized int32 Tensors of the same length as logits. + weights: List + List of 1D batch-sized float-Tensors of the same length as logits. + average_across_timesteps: Boolean + If set, divide the returned cost by the total label weight. + softmax_loss_function: None or Function + Function (labels, logits) -> loss-batch to be used instead of the standard softmax (the default if this is None). + **Note that to avoid confusion, it is required for the function to accept named arguments.** + name: None or str + Optional name for this operation, default: "sequence_loss_by_example". + + Returns + ------- + 1D batch-sized float Tensor: The log-perplexity for each sequence. + + Raises + ------ + ValueError: If len(logits) is different from len(targets) or len(weights). + + """ + + raise NotImplementedError("Not Implemented.") + + +def cross_entropy_seq(logits, target_seqs, batch_size=None): + """Returns the expression of cross-entropy of two sequences, implement + softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__. + + Parameters + ---------- + logits : Tensor + 2D tensor with shape of `[batch_size * n_steps, n_classes]`. + target_seqs : Tensor + The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead. + batch_size : None or int. + Whether to divide the cost by batch size. + - If integer, the return cost will be divided by `batch_size`. + - If None (default), the return cost will not be divided by anything. + + Examples + -------- + >>> import tensorlayer as tl + >>> # see `PTB example `__.for more details + >>> # outputs shape : (batch_size * n_steps, n_classes) + >>> # targets shape : (batch_size, n_steps) + >>> cost = tl.cost.cross_entropy_seq(outputs, targets) + + """ + + raise NotImplementedError("Not Implemented.") + + +def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None): + """Returns the expression of cross-entropy of two sequences, implement + softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output. + + Parameters + ----------- + logits : Tensor + 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example. + - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`. + target_seqs : Tensor + int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example. + input_mask : Tensor + The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1. + return_details : boolean + Whether to return detailed losses. + - If False (default), only returns the loss. + - If True, returns the loss, losses, weights and targets (see source code). + + Examples + -------- + >>> import tensorlayer as tl + >>> import tensorflow as tf + >>> import numpy as np + >>> batch_size = 64 + >>> vocab_size = 10000 + >>> embedding_size = 256 + >>> ni = tl.layers.Input([batch_size, None], dtype=tf.int64) + >>> net = tl.layers.Embedding( + ... vocabulary_size = vocab_size, + ... embedding_size = embedding_size, + ... name = 'seq_embedding')(ni) + >>> net = tl.layers.RNN( + ... cell =tf.keras.layers.LSTMCell(units=embedding_size, dropout=0.1), + ... return_seq_2d = True, + ... name = 'dynamicrnn')(net) + >>> net = tl.layers.Dense(n_units=vocab_size, name="output")(net) + >>> model = tl.models.Model(inputs=ni, outputs=net) + >>> input_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) + >>> target_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) + >>> input_mask = np.random.randint(0, 2, size=(batch_size, 10), dtype=np.int64) + >>> outputs = model(input_seqs, is_train=True) + >>> loss = tl.cost.cross_entropy_seq_with_mask(outputs, target_seqs, input_mask) + + """ + + raise NotImplementedError("Not Implemented.") + + +def cosine_similarity(v1, v2): + """Cosine similarity [-1, 1]. + + Parameters + ---------- + v1, v2 : Tensor + Tensor with the same shape [batch_size, n_feature]. + + References + ---------- + - `Wiki `__. + + """ + + raise NotImplementedError("Not Implemented.") + + +# Regularization Functions +def li_regularizer(scale, scope=None): + """Li regularization removes the neurons of previous layer. The `i` represents `inputs`. + Returns a function that can be used to apply group li regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + scope: str + An optional scope name for this function. + + Returns + -------- + A function with signature `li(weights, name=None)` that apply Li regularization. + + Raises + ------ + ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + + raise NotImplementedError("Not Implemented.") + + +def lo_regularizer(scale): + """Lo regularization removes the neurons of current layer. The `o` represents `outputs` + Returns a function that can be used to apply group lo regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + ------- + A function with signature `lo(weights, name=None)` that apply Lo regularization. + + Raises + ------ + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + + raise NotImplementedError("Not Implemented.") + + +def maxnorm_regularizer(scale=1.0): + """Max-norm regularization returns a function that can be used to apply max-norm regularization to weights. + + More about max-norm, see `wiki-max norm `_. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn(weights, name=None)` that apply Lo regularization. + + Raises + -------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + + raise NotImplementedError("Not Implemented.") + + +def maxnorm_o_regularizer(scale): + """Max-norm output regularization removes the neurons of current layer. + Returns a function that can be used to apply max-norm regularization to each column of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_o(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + + raise NotImplementedError("Not Implemented.") + + +def maxnorm_i_regularizer(scale): + """Max-norm input regularization removes the neurons of previous layer. + Returns a function that can be used to apply max-norm regularization to each row of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_i(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + + raise NotImplementedError("Not Implemented.") + + +def huber_loss( + output, target, is_mean=True, delta=1.0, dynamichuber=False, reverse=False, axis=-1, epsilon=0.00001, name=None +): + """Huber Loss operation, see ``https://en.wikipedia.org/wiki/Huber_loss`` . + Reverse Huber Loss operation, see ''https://statweb.stanford.edu/~owen/reports/hhu.pdf''. + Dynamic Reverse Huber Loss operation, see ''https://arxiv.org/pdf/1606.00373.pdf''. + + Parameters + ---------- + output : Tensor + A distribution with shape: [batch_size, ....], (any dimensions). + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data (default). + - If False, use ``tf.reduce_sum``. + delta: float + The point where the huber loss function changes from a quadratic to linear. + dynamichuber: boolean + Whether compute the coefficient c for each batch. + - If True, c is 20% of the maximal per-batch error. + - If False, c is delta. + reverse: boolean + Whether compute the reverse huber loss. + axis : int or list of int + The dimensions to reduce. + epsilon: + Eplison. + name : string + Name of this loss. + + """ + + raise NotImplementedError("Not Implemented.") diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 15b54395b..7d760087a 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -21,6 +21,8 @@ # from mindspore.train.parallel_utils import ParallelMode from mindspore.nn.wrap import DistributedGradReducer from mindspore.common import ParameterTuple +if tl.BACKEND == 'paddle': + import paddle as pd class Model: @@ -35,28 +37,7 @@ class Model: network should contain the logic of loss and grads calculation, and the logic of parallel if needed. Default: None. optimizer : Optimizer for updating the weights. Default: None. - metrics (Union[dict, set]): Dict or set of metrics to be evaluated by the model during - training and testing. eg: {'accuracy', 'recall'}. Default: None. - eval_network (Cell): Network for evaluation. If not defined, `network` and `loss_fn` would be wrapped as - `eval_network`. Default: None. - eval_indexes (list): In case of defining the `eval_network`, if `eval_indexes` is None, all outputs of - `eval_network` would be passed to metrics, otherwise `eval_indexes` must contain three - elements, representing the positions of loss value, predict value and label, the loss - value would be passed to `Loss` metric, predict value and label would be passed to other - metric. Default: None. - amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed - precision training. Supports [O0, O2, O3]. Default: "O0". - - - O0: Do not change. - - O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale. - - O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'. - - O2 is recommended on GPU, O3 is recommended on Ascend. - - loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else - scale the loss by LossScaleManager. If it is set, overwrite the level setting. It's a eyword argument. - e.g. Use `loss_scale_manager=None` to set the value. - keep_batchnorm_fp32 (bool): Keep Batchnorm run in `float32`. If set, overwrite the level setting. Default: True. + metrics : Dict or set of metrics to be evaluated by the model during Examples: >>> import tensorlayer as tl @@ -84,7 +65,7 @@ class Model: """ def __init__( - self, network, loss_fn=None, optimizer=None, metrics=None, eval_network=None, eval_indexes=None, amp_level="O0", + self, network, loss_fn=None, optimizer=None, metrics=None, **kwargs ): self.network = network @@ -110,6 +91,12 @@ def train(self, n_epoch, train_dataset=None, test_dataset=False, print_train_bat train_weights=self.train_weights, optimizer=self.optimizer, metrics=self.metrics, print_train_batch=print_train_batch, print_freq=print_freq, test_dataset=test_dataset ) + elif tl.BACKEND == 'paddle': + self.pd_train( + n_epoch=n_epoch, train_dataset=train_dataset, network=self.network, loss_fn=self.loss_fn, + train_weights=self.train_weights, optimizer=self.optimizer, metrics=self.metrics, + print_train_batch=print_train_batch, print_freq=print_freq, test_dataset=test_dataset + ) def eval(self, test_dataset): self.network.eval() @@ -364,6 +351,58 @@ def ms_train( print(" val acc: {}".format(val_acc / n_iter)) + def pd_train( + self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, + print_freq, test_dataset + ): + for epoch in range(n_epoch): + start_time = time.time() + + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_dataset: + network.set_train() + + output = network(X_batch) + loss = loss_fn(output, y_batch) + loss_ce = loss.numpy() + loss.backward() + optimizer.step() + optimizer.clear_grad() + + train_loss += loss_ce + if metrics: + train_acc += metrics(output, y_batch) + else: + train_acc += pd.metric.accuracy(output, y_batch) + n_iter += 1 + + if print_train_batch: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc.numpy() / n_iter)) + + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc.numpy() / n_iter)) + + if test_dataset: + # use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + network.eval() + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_dataset: + _logits = network(X_batch) # is_train=False, disable dropout + val_loss += loss_fn(_logits, y_batch, name='eval_loss') + if metrics: + val_acc += metrics(_logits, y_batch) + else: + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + + class WithLoss(Module): def __init__(self, backbone, loss_fn): From 556ca850c9e9f5554e3d8166ba0271e8aa5d6190 Mon Sep 17 00:00:00 2001 From: hanjr Date: Wed, 12 May 2021 15:53:54 +0800 Subject: [PATCH 09/36] update --- .../tutorial_paddle_tensorlayer_mlp.py | 22 +- tensorlayer/__init__.py | 1 + tensorlayer/backend/ops/__init__.py | 12 +- tensorlayer/backend/ops/dragon_backend.py | 14 +- tensorlayer/backend/ops/mindspore_backend.py | 23 +- tensorlayer/backend/ops/mindspore_nn.py | 380 +++- tensorlayer/backend/ops/paddle_backend.py | 19 +- tensorlayer/backend/ops/paddle_nn.py | 104 ++ tensorlayer/backend/ops/tensorflow_backend.py | 14 +- tensorlayer/backend/ops/tensorflow_nn.py | 329 +++- tensorlayer/dataflow/__init__.py | 10 +- tensorlayer/dataflow/image/__init__.py | 15 + tensorlayer/dataflow/image/mindspore_image.py | 1539 +++++++++++++++++ tensorlayer/dataflow/image/paddle_image.py | 19 + .../dataflow/image/tensorflow_image.py | 760 ++++++++ tensorlayer/dataflow/mindspore_data.py | 14 +- tensorlayer/dataflow/mindspore_image.py | 305 ---- tensorlayer/dataflow/paddle_data.py | 131 ++ tensorlayer/dataflow/tensorflow_data.py | 12 + tensorlayer/dataflow/tensorflow_image.py | 200 --- tensorlayer/layers/convolution/__init__.py | 18 +- tensorlayer/layers/convolution/binary_conv.py | 155 ++ tensorlayer/layers/convolution/dorefa_conv.py | 168 ++ tensorlayer/layers/convolution/group_conv.py | 164 ++ .../layers/convolution/separable_conv.py | 319 ++++ tensorlayer/layers/pooling.py | 359 +++- tensorlayer/metric/__init__.py | 15 + tensorlayer/metric/mindspore_metric.py | 88 + tensorlayer/metric/paddle_metric.py | 89 + tensorlayer/metric/tensorflow_metric.py | 98 ++ tensorlayer/models/core.py | 43 +- tensorlayer/optimizers/__init__.py | 4 +- .../optimizers/mindspore_optimizers.py | 6 +- tensorlayer/optimizers/paddle_optimizers.py | 355 +++- .../optimizers/tensorflow_optimizers.py | 4 +- tests/dataflow/__init__.py | 0 tests/dataflow/test_dataflow_image.py | 279 +++ tests/layers/test_layers_convolution.py | 99 +- tests/layers/test_layers_pooling.py | 34 +- 39 files changed, 5596 insertions(+), 624 deletions(-) create mode 100644 tensorlayer/dataflow/image/mindspore_image.py create mode 100644 tensorlayer/dataflow/image/paddle_image.py create mode 100644 tensorlayer/dataflow/image/tensorflow_image.py delete mode 100644 tensorlayer/dataflow/mindspore_image.py create mode 100644 tensorlayer/dataflow/paddle_data.py delete mode 100644 tensorlayer/dataflow/tensorflow_image.py create mode 100644 tensorlayer/layers/convolution/binary_conv.py create mode 100644 tensorlayer/layers/convolution/dorefa_conv.py create mode 100644 tensorlayer/layers/convolution/group_conv.py create mode 100644 tensorlayer/layers/convolution/separable_conv.py create mode 100644 tensorlayer/metric/__init__.py create mode 100644 tensorlayer/metric/mindspore_metric.py create mode 100644 tensorlayer/metric/paddle_metric.py create mode 100644 tensorlayer/metric/tensorflow_metric.py create mode 100644 tests/dataflow/__init__.py create mode 100644 tests/dataflow/test_dataflow_image.py diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py index ea2f46826..274f6be11 100644 --- a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py +++ b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py @@ -3,22 +3,19 @@ import os os.environ['TL_BACKEND'] = 'paddle' -from paddle.vision.transforms import Compose, Normalize -import paddle - import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Flatten -transform = Compose([Normalize(mean=[127.5], - std=[127.5], - data_format='CHW')]) print('download training data and load training data') -train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform) -test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + print('load finished') + class MLP(Module): + def __init__(self): super(MLP, self).__init__() self.linear1 = Dense(n_units=120, in_channels=784, act=tl.ReLU) @@ -33,9 +30,12 @@ def forward(self, x): x = self.linear3(x) return x -train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) +traindataset = tl.dataflow.FromSlices((X_train, y_train)) +train_loader = tl.dataflow.Dataloader(traindataset, batch_size=64, shuffle=True) net = MLP() -optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=net.trainable_weights) -model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) + +optimizer = tl.optimizers.Adam(learning_rate=0.001) +metric = tl.metric.Accuracy() +model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) model.train(n_epoch=20, train_dataset=train_loader, print_freq=5, print_train_batch=True) diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index b111a3edb..442dce1f5 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -50,6 +50,7 @@ from tensorlayer import rein from tensorlayer import utils from tensorlayer import dataflow + from tensorlayer import metric from tensorlayer.lazy_imports import LazyImport diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 5b8a61b4e..96277aefa 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -30,6 +30,9 @@ from .load_backend import Conv1d_transpose from .load_backend import Conv2d_transpose from .load_backend import Conv3d_transpose +from .load_backend import GroupConv2D +from .load_backend import BinaryConv2D +from .load_backend import DorefaConv2D from .load_backend import ReLU from .load_backend import ReLU6 @@ -47,6 +50,14 @@ from .load_backend import Dropout from .load_backend import BatchNorm from .load_backend import DepthwiseConv2d +from .load_backend import SeparableConv1D +from .load_backend import SeparableConv2D +from .load_backend import AdaptiveMeanPool1D +from .load_backend import AdaptiveMeanPool2D +from .load_backend import AdaptiveMeanPool3D +from .load_backend import AdaptiveMaxPool1D +from .load_backend import AdaptiveMaxPool2D +from .load_backend import AdaptiveMaxPool3D # load ops from .load_backend import Variable @@ -123,4 +134,3 @@ from .load_backend import Meshgrid from .load_backend import BatchToSpace from .load_backend import DepthToSpace - diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py index 37e6e5aa5..e62f27e84 100644 --- a/tensorlayer/backend/ops/dragon_backend.py +++ b/tensorlayer/backend/ops/dragon_backend.py @@ -548,7 +548,9 @@ def reduce_min(input_tensor, axis=None): """ return D.min(input_tensor, axis) + class Pad(object): + def __init__(self, paddings, mode="REFLECT"): if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: raise Exception("Unsupported mode: {}".format(mode)) @@ -561,6 +563,7 @@ def __call__(self, x): outputs = D.pad(x, pads=self.paddings, mode=self.mode, value=0) return outputs + def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ Pads a tensor. @@ -627,6 +630,7 @@ def stack(values, axis=0): class Meshgrid(object): + def __init__(self, indexing='xy'): super(Meshgrid, self).__init__() self.index = indexing @@ -947,7 +951,6 @@ def __call__(self, *args, **kwargs): pass - class Resize: def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): @@ -1010,19 +1013,25 @@ def __init__(self): def __call__(self, x): return D.math.sign(x) + def ceil(x): raise NotImplementedError + def multiply(x, y): raise NotImplementedError + def divide(x, y): raise NotImplementedError + def identity(x): raise NotImplementedError + class BatchToSpace(object): + def __init__(self, block_size, crops): super(BatchToSpace, self).__init__() pass @@ -1032,8 +1041,9 @@ def __call__(self, input_x): class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): pass def __call__(self, input): - raise NotImplementedError \ No newline at end of file + raise NotImplementedError diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index d067be2f3..b602a4b8d 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -221,6 +221,7 @@ def _initialize(self, arr): class RandomNormal(Cell): + def __init__(self, mean=0.0, stddev=0.01, seed=None): super(RandomNormal, self).__init__() self.normal = Normal(mean=mean, stddev=stddev, seed=seed) @@ -711,7 +712,9 @@ def reduce_min(input_tensor, axis=None): outputs = Rmin_obj(input_tensor, axis) return outputs + class Pad(Cell): + def __init__(self, paddings, mode="REFLECT"): super(Pad, self).__init__() if mode not in ["REFLECT", "SYMMETRIC"]: @@ -722,6 +725,7 @@ def __init__(self, paddings, mode="REFLECT"): def construct(self, x): return self.pad(x, self.paddings) + def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ Pads a tensor. @@ -745,6 +749,7 @@ def pad(tensor, paddings, mode='CONSTANT', constant_values=0): class Unstack(Cell): + def __init__(self, axis, num=None): super(Unstack, self).__init__() if num is not None: @@ -756,6 +761,7 @@ def construct(self, values): class Stack(Cell): + def __init__(self, axis=0): super(Stack, self).__init__() self.stack = P.Pack(axis=axis) @@ -785,6 +791,7 @@ def stack(values, axis=0): class Meshgrid(Cell): + def __init__(self, indexing='xy'): super(Meshgrid, self).__init__() self._meshgrid = P.Meshgrid(indexing=indexing) @@ -794,7 +801,6 @@ def construct(self, *args): return self._meshgrid(inputs) - def meshgrid(*args, **kwargs): """ Broadcasts parameters for evaluation on an N-D grid. @@ -815,7 +821,6 @@ def meshgrid(*args, **kwargs): return _meshgrid(*args) - def range(start, limit=None, delta=1, dtype=None): """ Creates a sequence of numbers. @@ -885,7 +890,6 @@ def construct(self, input, multiples): return self.tile(input, tuple(multiples)) - def tile(input, multiples): """ Constructs a tensor by tiling a given tensor. @@ -1156,6 +1160,7 @@ def resize(inputs, output_size, method, antialias): class ZeroPadding1D(Cell): + def __init__(self, padding): super(ZeroPadding1D, self).__init__() if np.size(padding) == 2: @@ -1168,6 +1173,7 @@ def construct(self, inputs): class ZeroPadding2D(Cell): + def __init__(self, padding): super(ZeroPadding2D, self).__init__() if np.size(padding) == 4: @@ -1180,6 +1186,7 @@ def construct(self, inputs): class ZeroPadding3D(Cell): + def __init__(self, padding): super(ZeroPadding3D, self).__init__() if np.size(padding) == 6: @@ -1200,20 +1207,26 @@ def __init__(self): def construct(self, x): return self.sign(x) + def ceil(x): _ceil = P.Ceil() return _ceil(x) + def multiply(x, y): raise NotImplementedError + def divide(x, y): raise NotImplementedError + def identity(x): raise NotImplementedError + class BatchToSpace(Cell): + def __init__(self, block_size, crops): super(BatchToSpace, self).__init__() self.batch_to_space = P.BatchToSpace(block_size=block_size, crops=crops) @@ -1221,7 +1234,9 @@ def __init__(self, block_size, crops): def __call__(self, input_x): return self.batch_to_space(input_x) + class DepthToSpace(Cell): + def __init__(self, block_size, data_format='NHWC'): super(DepthToSpace, self).__init__() self.data_format = data_format @@ -1236,4 +1251,4 @@ def __call__(self, input): if self.data_format == 'NHWC': output = nchw_to_nhwc(output) - return output \ No newline at end of file + return output diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 3af443081..6e6619ef5 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -6,13 +6,13 @@ from mindspore.nn.cell import Cell from mindspore import context import mindspore as ms -from mindspore.ops import operations as P +import mindspore.ops as P from mindspore.ops import functional as F from mindspore.communication.management import get_group_size, get_rank from mindspore.communication import management from mindspore._checkparam import check_int_positive from mindspore._extends import cell_attr_register - +from mindspore.ops._grad.grad_base import bprop_getters def padding_format(padding): @@ -876,7 +876,6 @@ def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_ pass - class DepthwiseConv2d(Cell): def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): @@ -1138,7 +1137,6 @@ def conv3d_transpose( pass - class BatchNorm(Cell): """Batch Normalization base class.""" @@ -1321,3 +1319,377 @@ def construct(self, inputs): if self.data_format == 'channels_last' and self.get_dim(x) == '2d': y = nchw_to_nhwc(y) return y + + +class GroupConv2D(Cell): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, groups): + super(GroupConv2D, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + + if self.data_format is 'NHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + + elif self.data_format is 'NCHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.conv2d = P.Conv2D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, mode=1, group=groups + ) + + def construct(self, inputs, filters): + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + outputs = self.conv2d(inputs, filters) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +class SeparableConv1D(Cell): + + def __init__(self, stride, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + super(SeparableConv1D, self).__init__() + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + self.stride = (1, stride) + self.dilations = (1, dilations) + self.k_size = (1, k_size) + self.out_channel = out_channel + self.in_channel = in_channel + self.depth_multiplier = depth_multiplier + self.depthwise_conv = P.Conv2D( + out_channel=self.in_channel * self.depth_multiplier, kernel_size=self.k_size, pad_mode=self.padding, + stride=self.stride, dilation=self.dilations, mode=1, group=self.in_channel + ) + + self.pointwise_conv = P.Conv2D( + out_channel=self.out_channel, kernel_size=(1, 1), pad_mode=self.padding, stride=(1, 1), dilation=(1, 1), + mode=1, group=1 + ) + + self.expand_dims = P.ExpandDims() + self.squeeze = P.Squeeze(2) + + def construct(self, x, depthwise_filters, pointwise_filters): + + if self.data_format == 'NWC': + x = nhwc_to_nchw(x) + + x = self.expand_dims(x, 2) + depthwise_filters = self.expand_dims(depthwise_filters, 2) + pointwise_filters = self.expand_dims(pointwise_filters, 2) + + outputs = self.depthwise_conv(x, depthwise_filters) + outputs = self.pointwise_conv(outputs, pointwise_filters) + + outputs = self.squeeze(outputs) + + if self.data_format == 'NWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +class SeparableConv2D(Cell): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + super(SeparableConv2D, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.k_size = k_size + self.out_channel = out_channel + self.in_channel = in_channel + self.depth_multiplier = depth_multiplier + + if self.data_format is 'NHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + # self.transpose = P.Transpose() + elif self.data_format is 'NCHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.depthwise_conv = P.Conv2D( + out_channel=self.in_channel * self.depth_multiplier, kernel_size=self.k_size, pad_mode=self.padding, + stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel + ) + + self.pointwise_conv = P.Conv2D( + out_channel=self.out_channel, kernel_size=(1, 1), pad_mode=self.padding, stride=(1, 1), dilation=(1, 1), + mode=1, group=1 + ) + + def construct(self, x, depthwise_filters, pointwise_filters): + if self.data_format == 'NHWC': + x = nhwc_to_nchw(x) + + outputs = self.depthwise_conv(x, depthwise_filters) + outputs = self.pointwise_conv(outputs, pointwise_filters) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + return outputs + + +class AdaptiveMeanPool1D(Cell): + + def __init__(self, output_size, data_format): + super(AdaptiveMeanPool1D, self).__init__() + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size + self.expand_dims = P.ExpandDims() + self.squeeze = P.Squeeze(2) + + def construct(self, inputs): + + if self.data_format == 'NWC': + n, w, c = inputs.shape + inputs = nhwc_to_nchw(inputs) + else: + n, c, w = inputs.shape + inputs = self.expand_dims(inputs, 2) + + stride = (1, w // self.output_size) + kernel = (1, w - (self.output_size - 1) * stride[1]) + outputs = P.AvgPool(kernel_size=kernel, strides=stride, pad_mode='VALID')(inputs) + outputs = self.squeeze(outputs) + + if self.data_format == 'NWC': + outputs = nchw_to_nhwc(outputs) + + return outputs + + +class AdaptiveMeanPool2D(Cell): + + def __init__(self, output_size, data_format): + super(AdaptiveMeanPool2D, self).__init__() + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size + + def construct(self, inputs): + + if self.data_format == 'NHWC': + n, h, w, c = inputs.shape + inputs = nhwc_to_nchw(inputs) + else: + n, c, h, w = inputs.shape + + out_h, out_w = self.output_size + stride_h = h // out_h + kernel_h = h - (out_h - 1) * stride_h + stride_w = w // out_w + kernel_w = w - (out_w - 1) * stride_w + outputs = P.AvgPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID')(inputs) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + + return outputs + + +class AdaptiveMeanPool3D(Cell): + + pass + + +class AdaptiveMaxPool1D(Cell): + + def __init__(self, output_size, data_format): + super(AdaptiveMaxPool1D, self).__init__() + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size + self.expand_dims = P.ExpandDims() + self.squeeze = P.Squeeze(2) + + def construct(self, inputs): + + if self.data_format == 'NWC': + n, w, c = inputs.shape + inputs = nhwc_to_nchw(inputs) + else: + n, c, w = inputs.shape + inputs = self.expand_dims(inputs, 2) + + stride = (1, w // self.output_size) + kernel = (1, w - (self.output_size - 1) * stride[1]) + outputs = P.MaxPool(kernel_size=kernel, strides=stride, pad_mode='VALID')(inputs) + outputs = self.squeeze(outputs) + + if self.data_format == 'NWC': + outputs = nchw_to_nhwc(outputs) + + return outputs + + +class AdaptiveMaxPool2D(Cell): + + def __init__(self, output_size, data_format): + super(AdaptiveMaxPool2D, self).__init__() + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size + + def construct(self, inputs): + + if self.data_format == 'NHWC': + n, h, w, c = inputs.shape + inputs = nhwc_to_nchw(inputs) + else: + n, c, h, w = inputs.shape + + out_h, out_w = self.output_size + stride_h = h // out_h + kernel_h = h - (out_h - 1) * stride_h + stride_w = w // out_w + kernel_w = w - (out_w - 1) * stride_w + outputs = P.MaxPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID')(inputs) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + + return outputs + + +class AdaptiveMaxPool3D(Cell): + + pass + + +class BinaryConv2D(Cell): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + super(BinaryConv2D, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + if self.data_format is 'NHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + # self.transpose = P.Transpose() + elif self.data_format is 'NCHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.conv2d = P.Conv2D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, mode=1, group=1 + ) + + @bprop_getters.register(P.Sign) + def get_bprop_Sign(self): + + def bprop(x, out, dout): + + grad = P.clip_by_value(dout, -1, 1) + return (grad, ) + + return bprop + + self.sign = P.Sign() + + def construct(self, inputs, filters): + + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + filters = self.sign(filters) + + outputs = self.conv2d(inputs, filters) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + + return outputs + + +class DorefaConv2D(Cell): + + def __init__(self, bitW, bitA, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + super(DorefaConv2D, self).__init__() + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.bitW = ms.Tensor(bitW) + self.bitA = ms.Tensor(bitA) + if self.data_format is 'NHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + # self.transpose = P.Transpose() + elif self.data_format is 'NCHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.conv2d = P.Conv2D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, mode=1, group=1 + ) + + @bprop_getters.register(P.Round) + def get_bprop_Round(self): + + def bprop(x, out, dout): + + return (dout, ) + + return bprop + + @bprop_getters.register(P.Sign) + def get_bprop_Sign(self): + + def bprop(x, out, dout): + + return (dout, ) + + return bprop + + self.mimimum = P.Minimum() + self.abs = P.Abs() + self.round = P.Round() + self.reducemean = P.ReduceMean() + self.sign = P.Sign() + self.pow = P.Pow() + self.sub = P.Sub() + self.oneslike = P.OnesLike() + + def cabs(self, inputs): + + a = P.stop_gradient(self.oneslike(inputs)) + return self.mimimum(self.abs(inputs), a) + + def _quantize_dorefa(self, x, k): + + n = self.sub(self.pow(2.0, k), 1) + return self.round(x * n) / n + + def quantize_active(self, x, bitA): + if bitA == 32: + return x + return self._quantize_dorefa(x, bitA) + + def quantize_weight(self, x, bitW, force_quantization=False): + + if bitW == 32 and not force_quantization: + return x + + if bitW == 1: + E = P.stop_gradient(self.reducemean(self.abs(x))) + return self.sign(x / E) * E + + x = P.clip_by_value(x * 0.5 + 0.5, 0.0, 1.0) + + return 2 * self._quantize_dorefa(x, bitW) - 1 + + def construct(self, inputs, filters): + + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + inputs = self.quantize_active(self.cabs(inputs), self.bitA) + + filters = self.quantize_weight(filters, self.bitW) + + outputs = self.conv2d(inputs, filters) + + if self.data_format == 'NHWC': + outputs = nchw_to_nhwc(outputs) + + return outputs diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py index e9b37c562..f7334c0bc 100644 --- a/tensorlayer/backend/ops/paddle_backend.py +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -20,6 +20,7 @@ uint32 = "uint32" uint64 = "uint64" + def _getter(init_fn, **kwargs): """Return an named eager tensor.""" raise NotImplementedError @@ -272,6 +273,7 @@ def dtypes(dt): class Maximum(object): + def __init__(self): pass @@ -280,6 +282,7 @@ def __call__(self, x, y): class Minimum(object): + def __init__(self): pass @@ -313,7 +316,7 @@ def __init__(self): pass def __call__(self, inputs): - return pd.flatten(x=inputs, start_axis=1,stop_axis=-1) + return pd.flatten(x=inputs, start_axis=1, stop_axis=-1) class Reshape(object): @@ -504,7 +507,9 @@ def reduce_min(input_tensor, axis=None): """ raise NotImplementedError + class Pad(object): + def __init__(self, paddings, mode="REFLECT"): if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: raise Exception("Unsupported mode: {}".format(mode)) @@ -516,6 +521,7 @@ def __init__(self, paddings, mode="REFLECT"): def __call__(self, x): raise NotImplementedError + def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ Pads a tensor. @@ -577,6 +583,7 @@ def stack(values, axis=0): class Meshgrid(object): + def __init__(self, indexing='xy'): super(Meshgrid, self).__init__() self.index = indexing @@ -886,7 +893,6 @@ def __call__(self, *args, **kwargs): pass - class Resize: def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): @@ -943,19 +949,25 @@ def __init__(self): def __call__(self, x): raise NotImplementedError + def ceil(x): raise NotImplementedError + def multiply(x, y): raise NotImplementedError + def divide(x, y): raise NotImplementedError + def identity(x): raise NotImplementedError + class BatchToSpace(object): + def __init__(self, block_size, crops): super(BatchToSpace, self).__init__() pass @@ -965,8 +977,9 @@ def __call__(self, input_x): class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): pass def __call__(self, input): - raise NotImplementedError \ No newline at end of file + raise NotImplementedError diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 47d9dd062..535b9faaa 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -4,6 +4,7 @@ import paddle as pd import paddle.nn.functional as F + def padding_format(padding): """ Checks that the padding format correspond format. @@ -764,6 +765,7 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation pass + class Conv1d_transpose(object): def __init__( @@ -923,4 +925,106 @@ def __init__(self): pass def __call__(self, *args, **kwargs): + raise NotImplementedError + + +class GroupConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, groups): + pass + + def __call__(self, input, filters): + raise NotImplementedError + + +class SeparableConv1D(object): + + def __init__(self, stride, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + pass + + def __call__(self, inputs, depthwise_filters, pointwise_filters): + raise NotImplementedError + + +class SeparableConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + pass + + def __call__(self, inputs, depthwise_filters, pointwise_filters): + raise NotImplementedError + + +class AdaptiveMeanPool1D(object): + + def __init__(self, output_size, data_format): + pass + + def __call__(self, input): + + raise NotImplementedError + + +class AdaptiveMeanPool2D(object): + + def __init__(self, output_size, data_format): + pass + + def __call__(self, inputs): + + raise NotImplementedError + + +class AdaptiveMeanPool3D(object): + + def __init__(self, output_size, data_format): + pass + + def __call__(self, inputs): + raise NotImplementedError + + +class AdaptiveMaxPool1D(object): + + def __init__(self, output_size, data_format): + pass + + def __call__(self, input): + + raise NotImplementedError + + +class AdaptiveMaxPool2D(object): + + def __init__(self, output_size, data_format): pass + + def __call__(self, inputs): + raise NotImplementedError + + +class AdaptiveMaxPool3D(object): + + def __init__(self, output_size, data_format): + pass + + def __call__(self, inputs): + raise NotImplementedError + + +class BinaryConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + pass + + def __call__(self, inputs, filters): + raise NotImplementedError + + +class DorefaConv2D(object): + + def __init__(self, bitW, bitA, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + pass + + def __call__(self, inputs, filters): + raise NotImplementedError diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index 74df53def..9d9a00fdd 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -291,6 +291,7 @@ def dtypes(dt): class Maximum(object): + def __init__(self): pass @@ -299,6 +300,7 @@ def __call__(self, x, y): class Minimum(object): + def __init__(self): pass @@ -524,6 +526,7 @@ def reduce_min(input_tensor, axis=None): class Pad(object): + def __init__(self, paddings, mode="REFLECT"): if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: raise Exception("Unsupported mode: {}".format(mode)) @@ -534,6 +537,7 @@ def __call__(self, x): outputs = tf.pad(x, self.paddings, mode=self.mode, constant_values=0) return outputs + def pad(tensor, paddings, mode='CONSTANT', constant_values=0): """ Pads a tensor. @@ -600,6 +604,7 @@ def stack(values, axis=0): class Meshgrid(object): + def __init__(self, indexing='xy'): super(Meshgrid, self).__init__() self.index = indexing @@ -931,7 +936,6 @@ def __call__(self, input, axis=None): return tf.math.count_nonzero(input, axis=axis, keepdims=self.keepdims, dtype=self.dtype) - class Resize: def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): @@ -992,19 +996,25 @@ def __init__(self): def __call__(self, x): return tf.sign(x) + def ceil(x): return tf.math.ceil(x) + def multiply(x, y): return tf.multiply(x, y) + def divide(x, y): return tf.divide(x, y) + def identity(x): return tf.identity(x) + class BatchToSpace(object): + def __init__(self, block_size, crops): self.bolock_size = block_size self.crops = crops @@ -1012,7 +1022,9 @@ def __init__(self, block_size, crops): def __call__(self, input_x): return tf.batch_to_space(input=input_x, block_shape=self.bolock_size, crops=self.crops) + class DepthToSpace(object): + def __init__(self, block_size, data_format='NHWC'): self.block_size = block_size self.data_format = data_format diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 71d978f5d..5cefda342 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -5,7 +5,7 @@ from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.training import moving_averages - +from math import floor, ceil # loss function sparse_softmax_cross_entropy_with_logits = tf.nn.sparse_softmax_cross_entropy_with_logits sigmoid_cross_entropy_with_logits = tf.nn.sigmoid_cross_entropy_with_logits @@ -1517,3 +1517,330 @@ def __call__(self, inputs): ) return outputs + + +class GroupConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, groups): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.strides = strides + self.dilations = dilations + self.groups = groups + if self.data_format == 'NHWC': + self.channels_axis = 3 + else: + self.channels_axis = 1 + + def __call__(self, input, filters): + + if self.groups == 1: + outputs = tf.nn.conv2d( + input=input, + filters=filters, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + else: + inputgroups = tf.split(input, num_or_size_splits=self.groups, axis=self.channels_axis) + weightsgroups = tf.split(filters, num_or_size_splits=self.groups, axis=self.channels_axis) + convgroups = [] + for i, k in zip(inputgroups, weightsgroups): + convgroups.append( + tf.nn.conv2d( + input=i, + filters=k, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + ) + outputs = tf.concat(axis=self.channels_axis, values=convgroups) + + return outputs + + +class SeparableConv1D(object): + + def __init__(self, stride, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + self.data_format, self.padding = preprocess_1d_format(data_format, padding) + + if self.data_format == 'NWC': + self.spatial_start_dim = 1 + self.strides = (1, stride, stride, 1) + self.data_format = 'NHWC' + else: + self.spatial_start_dim = 2 + self.strides = (1, 1, stride, stride) + self.data_format = 'NCHW' + self.dilation_rate = (1, dilations) + + def __call__(self, inputs, depthwise_filters, pointwise_filters): + inputs = tf.expand_dims(inputs, axis=self.spatial_start_dim) + depthwise_filters = tf.expand_dims(depthwise_filters, 0) + pointwise_filters = tf.expand_dims(pointwise_filters, 0) + + outputs = tf.nn.separable_conv2d( + inputs, depthwise_filters, pointwise_filters, strides=self.strides, padding=self.padding, + dilations=self.dilation_rate, data_format=self.data_format + ) + + outputs = tf.squeeze(outputs, axis=self.spatial_start_dim) + + return outputs + + +class SeparableConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.strides = strides + self.dilations = (dilations[2], dilations[2]) + + def __call__(self, inputs, depthwise_filters, pointwise_filters): + + outputs = tf.nn.separable_conv2d( + inputs, depthwise_filters, pointwise_filters, strides=self.strides, padding=self.padding, + dilations=self.dilations, data_format=self.data_format + ) + + return outputs + + +class AdaptiveMeanPool1D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size + + def __call__(self, input): + + if self.data_format == 'NWC': + n, w, c = input.shape + else: + n, c, w = input.shape + + stride = floor(w / self.output_size) + kernel = w - (self.output_size - 1) * stride + output = tf.nn.avg_pool1d(input, ksize=kernel, strides=stride, data_format=self.data_format, padding='VALID') + + return output + + +class AdaptiveMeanPool2D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size + + def __call__(self, inputs): + + if self.data_format == 'NHWC': + n, h, w, c = inputs.shape + else: + n, c, h, w = inputs.shape + + out_h, out_w = self.output_size + stride_h = floor(h / out_h) + kernel_h = h - (out_h - 1) * stride_h + stride_w = floor(w / out_w) + kernel_w = w - (out_w - 1) * stride_w + + outputs = tf.nn.avg_pool2d( + inputs, ksize=(kernel_h, kernel_w), strides=(stride_h, stride_w), data_format=self.data_format, + padding='VALID' + ) + + return outputs + + +class AdaptiveMeanPool3D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_3d_format(data_format, None) + self.output_size = output_size + + def __call__(self, inputs): + + if self.data_format == 'NDHWC': + n, d, h, w, c = inputs.shape + else: + n, c, d, h, w = inputs.shape + + out_d, out_h, out_w = self.output_size + stride_d = floor(d / out_d) + kernel_d = d - (out_d - 1) * stride_d + stride_h = floor(h / out_h) + kernel_h = h - (out_h - 1) * stride_h + stride_w = floor(w / out_w) + kernel_w = w - (out_w - 1) * stride_w + + outputs = tf.nn.avg_pool3d( + inputs, ksize=(kernel_d, kernel_h, kernel_w), strides=(stride_d, stride_h, stride_w), + data_format=self.data_format, padding='VALID' + ) + + return outputs + + +class AdaptiveMaxPool1D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size + + def __call__(self, input): + + if self.data_format == 'NWC': + n, w, c = input.shape + else: + n, c, w = input.shape + + stride = floor(w / self.output_size) + kernel = w - (self.output_size - 1) * stride + output = tf.nn.max_pool1d(input, ksize=kernel, strides=stride, data_format=self.data_format, padding='VALID') + + return output + + +class AdaptiveMaxPool2D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size + + def __call__(self, inputs): + + if self.data_format == 'NHWC': + n, h, w, c = inputs.shape + else: + n, c, h, w = inputs.shape + + out_h, out_w = self.output_size + stride_h = floor(h / out_h) + kernel_h = h - (out_h - 1) * stride_h + stride_w = floor(w / out_w) + kernel_w = w - (out_w - 1) * stride_w + + outputs = tf.nn.max_pool2d( + inputs, ksize=(kernel_h, kernel_w), strides=(stride_h, stride_w), data_format=self.data_format, + padding='VALID' + ) + + return outputs + + +class AdaptiveMaxPool3D(object): + + def __init__(self, output_size, data_format): + self.data_format, _ = preprocess_3d_format(data_format, None) + self.output_size = output_size + + def __call__(self, inputs): + + if self.data_format == 'NDHWC': + n, d, h, w, c = inputs.shape + else: + n, c, d, h, w = inputs.shape + + out_d, out_h, out_w = self.output_size + stride_d = floor(d / out_d) + kernel_d = d - (out_d - 1) * stride_d + stride_h = floor(h / out_h) + kernel_h = h - (out_h - 1) * stride_h + stride_w = floor(w / out_w) + kernel_w = w - (out_w - 1) * stride_w + + outputs = tf.nn.max_pool3d( + inputs, ksize=(kernel_d, kernel_h, kernel_w), strides=(stride_d, stride_h, stride_w), + data_format=self.data_format, padding='VALID' + ) + + return outputs + + +class BinaryConv2D(object): + + def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.strides = strides + self.dilations = dilations + + # @tf.RegisterGradient("TL_Sign_QuantizeGrad") + # def _quantize_grad(op, grad): + # """Clip and binarize tensor using the straight through estimator (STE) for the gradient.""" + # return tf.clip_by_value(grad, -1, 1) + + def quantize(self, x): + # ref: https://github.com/AngusG/tensorflow-xnor-bnn/blob/master/models/binary_net.py#L70 + # https://github.com/itayhubara/BinaryNet.tf/blob/master/nnUtils.py + with tf.compat.v1.get_default_graph().gradient_override_map({"Sign": "TL_Sign_QuantizeGrad"}): + return tf.sign(x) + + def __call__(self, inputs, filters): + + filters = self.quantize(filters) + + outputs = tf.nn.conv2d( + input=inputs, filters=filters, strides=self.strides, padding=self.padding, data_format=self.data_format, + dilations=self.dilations + ) + + return outputs + + +class DorefaConv2D(object): + + def __init__(self, bitW, bitA, strides, padding, data_format, dilations, out_channel, k_size, in_channel): + self.data_format, self.padding = preprocess_2d_format(data_format, padding) + self.strides = strides + self.dilations = dilations + self.bitW = bitW + self.bitA = bitA + + def _quantize_dorefa(self, x, k): + G = tf.compat.v1.get_default_graph() + n = float(2**k - 1) + with G.gradient_override_map({"Round": "Identity"}): + return tf.round(x * n) / n + + def cabs(self, x): + return tf.minimum(1.0, tf.abs(x), name='cabs') + + def quantize_active(self, x, bitA): + if bitA == 32: + return x + return self._quantize_dorefa(x, bitA) + + def quantize_weight(self, x, bitW, force_quantization=False): + + G = tf.compat.v1.get_default_graph() + if bitW == 32 and not force_quantization: + return x + if bitW == 1: # BWN + with G.gradient_override_map({"Sign": "Identity"}): + E = tf.stop_gradient(tf.reduce_mean(input_tensor=tf.abs(x))) + return tf.sign(x / E) * E + x = tf.clip_by_value( + x * 0.5 + 0.5, 0.0, 1.0 + ) # it seems as though most weights are within -1 to 1 region anyways + return 2 * self._quantize_dorefa(x, bitW) - 1 + + def __call__(self, inputs, filters): + + inputs = self.quantize_active(self.cabs(inputs), self.bitA) + + filters = self.quantize_weight(filters, self.bitW) + + outputs = tf.nn.conv2d( + input=inputs, + filters=filters, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilations=self.dilations, + ) + + return outputs diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py index d26b3226e..912a2384a 100644 --- a/tensorlayer/dataflow/__init__.py +++ b/tensorlayer/dataflow/__init__.py @@ -3,21 +3,19 @@ from __future__ import absolute_import, division, print_function from tensorlayer.backend.ops.load_backend import BACKEND +from tensorlayer.dataflow import image if BACKEND == 'tensorflow': from .tensorflow_data import * - from .tensorflow_image import * elif BACKEND == 'mindspore': from .mindspore_data import * - from .mindspore_image import * - -elif BACKEND == 'dragon': - pass elif BACKEND == 'paddle': + from .paddle_data import * + +elif BACKEND == 'dragon': pass else: raise NotImplementedError("This backend is not supported") - diff --git a/tensorlayer/dataflow/image/__init__.py b/tensorlayer/dataflow/image/__init__.py index df05229a7..c0568ed47 100644 --- a/tensorlayer/dataflow/image/__init__.py +++ b/tensorlayer/dataflow/image/__init__.py @@ -1,2 +1,17 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function + +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_image import * +elif BACKEND == 'mindspore': + from .mindspore_image import * +elif BACKEND == 'paddle': + from .paddle_image import * +elif BACKEND == 'pytorch': + pass + +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/dataflow/image/mindspore_image.py b/tensorlayer/dataflow/image/mindspore_image.py new file mode 100644 index 000000000..9f10c7d1a --- /dev/null +++ b/tensorlayer/dataflow/image/mindspore_image.py @@ -0,0 +1,1539 @@ +import numpy as np +from PIL import Image, ImageOps, ImageEnhance, __version__ +import random +import colorsys +import numbers +import math +import io +__all__ = [ + 'CentralCrop', + 'HsvToRgb', + 'AdjustBrightness', + 'AdjustContrast', + 'AdjustHue', + 'AdjustSaturation', + 'Crop', + 'FlipHorizontal', + 'FlipVertical', + 'GrayToRgb', + 'Standardization', + 'RgbToGray', + 'PadToBoundingbox', + 'Pad', + 'RandomBrightness', + 'RandomContrast', + 'RandomHue', + 'RandomSaturation', + 'RandomCrop', + 'Resize', + 'CropAndResize', + 'CropOrPad', + 'ResizeAndPad', + 'RgbToHsv', + 'Transpose', + 'RandomRotation', + 'RandomShift', + 'RandomShear', + 'RandomZoom', + 'Rescale', + 'RandomFlipVertical', + 'RandomFlipHorizontal', + 'HWC2CHW', + 'CHW2HWC', +] + +augment_error_message = 'img should be PIL image. Got {}.' + + +def ToTensor(image): + + image = np.asarray(image).astype(np.float32) + return image + + +def ToPIL(image): + """ + Convert the input image to PIL format. + + Args: + img: Image to be converted. + + Returns: + img (PIL image), Converted image. + """ + return Image.fromarray(np.array(image).astype(np.uint8)) + + +def Decode(image): + """ + Decode the input image to PIL image format in RGB mode. + + Args: + img: Image to be decoded. + + Returns: + img (PIL image), Decoded image in RGB mode. + """ + + try: + data = io.BytesIO(image) + img = Image.open(data) + return img.convert('RGB') + except IOError as e: + raise ValueError("{0}\nWARNING: Failed to decode given image.".format(e)) + except AttributeError as e: + raise ValueError("{0}\nWARNING: Failed to decode, Image might already be decoded.".format(e)) + + +def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): + ''' + + Parameters + ---------- + image: + A image or a batch of images + offset_height: + Vertical coordinate of the top-left corner of the result in the input. + offset_width: + Horizontal coordinate of the top-left corner of the result in the input. + target_height: + Height of the result. + target_width: + Width of the result. + is_hwc: + If is_hwc is True, the order of image channels is [B,H,W,C] or [H,W,C]. If is_hwc is False, the order of image channels is [B,C,H,W] or [C,H,W,] + Returns: + Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if shape_size == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + + if offset_width < 0: + raise ValueError('offset_width must be >0.') + if offset_height < 0: + raise ValueError('offset_height must be >0.') + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if offset_width + target_width > width: + raise ValueError('offset_width + target_width must be <= image width.') + if offset_height + target_height > height: + raise ValueError('offset_height + target_height must be <= image height.') + + if shape_size == 3: + if is_hwc: + return ToTensor( + image[offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] + ) + else: + return ToTensor( + image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width] + ) + else: + if is_hwc: + return ToTensor( + image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] + ) + else: + return ToTensor( + image[:, :, offset_height:offset_height + target_height, offset_width:offset_width + target_width] + ) + + +def CentralCrop(image, central_fraction=None, size=None, is_hwc=True): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth] or a 4-D Tensor of shape [batch, height, width, depth], + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D float Tensor or 4-D float Tensor, as per the input. + ------- + If backend is tensorflow, central_fraction will be used preferentially. if size is used, the height-width ratio will be equivalent to original ratio.. + If backend is mindspore, size will be used preferentially. + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + + if shape_size == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + if size is None: + if central_fraction > 1 or central_fraction <= 0: + raise ValueError('central_fraction must be in (0,1].') + target_height = int(round(height * central_fraction)) + target_width = int(round(width * central_fraction)) + size = (target_height, target_width) + if isinstance(size, int): + size = (size, size) + crop_height, crop_width = size + crop_top = int(round((height - crop_height) / 2.)) + crop_left = int(round((width - crop_width) / 2.)) + + return Crop(image, crop_top, crop_left, crop_height, crop_width, is_hwc) + + +def hsv_to_rgb(np_hsv_img, is_hwc): + """ + Convert HSV img to RGB img. + + Args: + np_hsv_img (numpy.ndarray): NumPy HSV image array of shape (H, W, C) or (C, H, W) to be converted. + is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). + + Returns: + np_rgb_img (numpy.ndarray), NumPy HSV image with same shape of np_hsv_img. + """ + if is_hwc: + h, s, v = np_hsv_img[:, :, 0], np_hsv_img[:, :, 1], np_hsv_img[:, :, 2] + else: + h, s, v = np_hsv_img[0, :, :], np_hsv_img[1, :, :], np_hsv_img[2, :, :] + to_rgb = np.vectorize(colorsys.hsv_to_rgb) + r, g, b = to_rgb(h, s, v) + + if is_hwc: + axis = 2 + else: + axis = 0 + np_rgb_img = np.stack((r, g, b), axis=axis) + return np_rgb_img + + +def HsvToRgb(image, is_hwc=True): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if shape_size == 3: + batch_size = 0 + if is_hwc: + num_channels = image.shape[2] + else: + num_channels = image.shape[0] + else: + batch_size = image.shape[0] + if is_hwc: + num_channels = image.shape[3] + else: + num_channels = image.shape[1] + + if num_channels != 3: + raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) + if batch_size == 0: + return hsv_to_rgb(image, is_hwc) + return ToTensor([hsv_to_rgb(img, is_hwc) for img in image]) + + +def AdjustBrightness(image, factor): + ''' + + Parameters + ---------- + image: + input NumPy image array or PIL image + factor: + factor should be in the range (-1,1) + Returns: + ------- + np darray image + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if factor >= 1 or factor <= -1: + raise ValueError('factor must be in (-1,1).') + image = image + factor * 255 + image = np.clip(image, 0, 255) + + return ToTensor(image) + + +def AdjustContrast(image, factor): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image = ImageEnhance.Contrast(image).enhance(factor) + + return ToTensor(image) + + +def AdjustHue(image, factor): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + image_hue_factor = factor + if not -1 <= image_hue_factor <= 1: + raise ValueError('image_hue_factor {} is not in [-1, 1].'.format(image_hue_factor)) + + mode = image.mode + if mode in {'L', '1', 'I', 'F'}: + return image + + hue, saturation, value = image.convert('HSV').split() + + np_hue = np.array(hue, dtype=np.uint8) + + with np.errstate(over='ignore'): + np_hue += np.uint8(image_hue_factor * 255) + hue = Image.fromarray(np_hue, 'L') + + image = Image.merge('HSV', (hue, saturation, value)).convert(mode) + + return ToTensor(image) + + +def AdjustSaturation(image, factor): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + enhancer = ImageEnhance.Color(image) + image = enhancer.enhance(factor) + + return ToTensor(image) + + +def FlipHorizontal(image): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + + image = np.fliplr(image) + + return image + + +def FlipVertical(image): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + image = np.flipud(image) + + return image + + +def GrayToRgb(image): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape = image.shape + output_image = np.zeros((shape[0], shape[1], 3), dtype=np.uint8) + if len(shape) == 3: + for i in range(3): + output_image[:, :, i] = image[:, :, 1] + elif len(shape) == 2: + for i in range(3): + output_image[:, :, i] = image + + return ToTensor(output_image) + + +def RgbToGray(image): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + ''' + 将彩色图像转换为灰度(模式“L”)时,库使用ITU-R 601-2 Luma转换: + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + ''' + image = image.convert('L') + return ToTensor(image) + + +def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): + ''' + + Parameters + ---------- + image: + A 3-D numpy ndarray or 4-D numpy ndarray image + offset_height: + Number of rows of zeros to add on top. + offset_width: + Number of columns of zeros to add on the left. + target_height: + Height of output image. + target_width + Width of output image. + Returns + A numpy ndarray image + ------- + ''' + + if offset_height < 0: + raise ValueError("offset_height must be >= 0") + if offset_width < 0: + raise ValueError("offset_width must be >= 0") + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if shape_size == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + top = offset_height + bottom = target_height - height - top + left = offset_width + right = target_width - width - left + + if bottom < 0: + raise ValueError("target_height must be >= offset_height + height") + + if right < 0: + raise ValueError("target_width must be >= offset_width + width") + + if shape_size == 3: + if is_hwc: + return ToTensor( + np.pad( + image, ((top, bottom), (left, right), (0, 0)), mode='constant', + constant_values=(padding_value, padding_value) + ) + ) + else: + return ToTensor( + np.pad( + image, ((0, 0), (top, bottom), (left, right)), mode='constant', + constant_values=(padding_value, padding_value) + ) + ) + else: + if is_hwc: + return ToTensor( + np.pad( + image, ((0, 0), (top, bottom), (left, right), (0, 0)), mode='constant', + constant_values=(padding_value, padding_value) + ) + ) + else: + return ToTensor( + np.pad( + image, ((0, 0), (0, 0), (top, bottom), (left, right)), mode='constant', + constant_values=(padding_value, padding_value) + ) + ) + + +def Pad(image, padding, padding_value=0, mode='constant', is_hwc=True): + ''' + + Parameters + ---------- + image: + A 3-D or 4-D Tensor. + padding: + An integer or a list/tuple. If a single number is provided, pad all borders with this value. + If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. + If 4 values are provided as a list or tuple, pad the left, top, right and bottom respectively. + padding_value: + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + mode: + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + Returns: + A padded Tensor. Has the same type as tensor. + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = image.shape + if len(shape_size) == 3: + batch_size = 0 + elif len(shape_size) == 4: + batch_size = shape_size[0] + else: + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if mode not in ('constant', 'edge', 'reflect', 'symmetric'): + raise TypeError('mode should be one of (constant,edge,reflect,symmetric).') + + if isinstance(padding, int): + padding = ((padding, padding), (padding, padding)) + elif isinstance(padding, list) or isinstance(padding, tuple): + if len(padding) == 2: + padding = ((padding[0], padding[0]), (padding[1], padding[1])) + elif len(padding) == 4: + padding = ((padding[0], padding[1]), (padding[2], padding[3])) + else: + raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) + else: + raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) + + if batch_size == 0: + if is_hwc: + padding = (padding[0], padding[1], (0, 0)) + else: + padding = ( + (0, 0), + padding[0], + padding[1], + ) + else: + if is_hwc: + padding = ((0, 0), padding[0], padding[1], (0, 0)) + else: + padding = ((0, 0), (0, 0), padding[0], padding[1]) + if mode == 'constant': + return ToTensor(np.pad(image, padding, mode=mode, constant_values=(padding_value, padding_value))) + else: + return ToTensor(np.pad(image, padding, mode=mode)) + + +def Standardization(image, mean=None, std=None, channel_mode=False, is_hwc=True): + ''' + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + num_shape = image.shape + if is_hwc: + height, width, channels = 0, 1, 2 + else: + channels, height, width = 0, 1, 2 + if mean is not None and std is not None: + if len(mean) != len(std): + raise ValueError("Length of mean and std must be equal") + if len(mean) == 1: + mean = [mean[0]] * num_shape[channels] + std = [std[0]] * num_shape[channels] + mean = np.array(mean, dtype=image.dtype) + std = np.array(std, dtype=image.dtype) + return ToTensor((image - mean[:, None, None]) / std[:, None, None]) + elif mean is None and std is None: + if channel_mode: + num_pixels = num_shape[height] * num_shape[width] + image_mean = np.mean(image, axis=(height, width)) + stddev = np.std(image, axis=(height, width)) + min_sttdev = 1 / np.sqrt(num_pixels) + min_sttdev = [min_sttdev] * num_shape[channels] + adjusted_sttdev = np.maximum(stddev, min_sttdev) + image -= image_mean + image = np.divide(image, adjusted_sttdev) + return ToTensor(image) + else: + num_pixels = num_shape[height] * num_shape[width] * num_shape[channels] + image_mean = np.mean(image, axis=(0, 1, 2)) + image_mean = [image_mean] * 3 + stddev = np.std(image, axis=(0, 1, 2)) + min_sttdev = 1 / np.sqrt(num_pixels) + adjusted_sttdev = np.maximum(stddev, min_sttdev) + adjusted_sttdev = [adjusted_sttdev] * 3 + image -= image_mean + image = np.divide(image, adjusted_sttdev) + return ToTensor(image) + else: + raise ValueError('std and mean must both be None or not None') + + +def RandomBrightness(image, factor): + ''' + + Parameters + ---------- + image: + An image or images to adjust + factor: + Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). + Returns: + The brightness-adjusted image(s). + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if factor < 0 or factor > 1: + raise ValueError('factor should be in [0,1].') + delta = random.uniform(-factor, factor) + image = image + delta * 255 + image = np.clip(image, 0, 255) + + return image + + +def RandomContrast(image, lower, upper, seed=None): + ''' + + Parameters + ---------- + image: + An image tensor with 3 or more dimensions. + lower: + float. Lower bound for the random contrast factor. + upper: + float. Upper bound for the random contrast factor. + seed: + A Python integer. Used to create a random seed. + + Returns: + The contrast-adjusted image(s). + ------- + ''' + if upper <= lower: + raise ValueError('upper must be > lower') + if lower < 0: + raise ValueError('lower must be non-negative') + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + factor = random.uniform(lower, upper) + image = ImageEnhance.Contrast(image).enhance(factor) + + return ToTensor(image) + + +def RandomHue(image, factor, seed=None): + ''' + + Parameters + ---------- + image: + RGB image or images. The size of the last dimension must be 3. + factor: + float. The maximum value for the random factor. + seed: + An operation-specific seed. I + + Returns: + Adjusted numpy ndarrry image(s). + ------- + + ''' + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + if factor > 0.5 or factor < 0: + raise ValueError('factor should be in [0,0.5].') + + image_hue_factor = random.uniform(-factor, factor) + mode = image.mode + if mode in {'L', '1', 'I', 'F'}: + return image + + hue, saturation, value = image.convert('HSV').split() + + np_hue = np.array(hue, dtype=np.uint8) + + with np.errstate(over='ignore'): + np_hue += np.uint8(image_hue_factor * 255) + hue = Image.fromarray(np_hue, 'L') + + image = Image.merge('HSV', (hue, saturation, value)).convert(mode) + + return ToTensor(image) + + +def RandomSaturation(image, lower, upper, seed=None): + ''' + Parameters + ---------- + image: + RGB image or images. The size of the last dimension must be 3. + lower: + float. Lower bound for the random saturation factor. + upper: + float. Upper bound for the random saturation factor. + seed: + An operation-specific seed. + + Returns; + Adjusted numpy ndarray image(s). + ------- + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + if upper <= lower: + raise ValueError('upper must be > lower.') + + if lower < 0: + raise ValueError('lower must be non-negative.') + factor = random.uniform(lower, upper) + enhancer = ImageEnhance.Color(image) + image = enhancer.enhance(factor) + + return ToTensor(image) + + +def RandomCrop(image, size, is_hwc=True): + ''' + + Parameters + ---------- + image: + Input an image to crop. + size: + if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. + shape of cropped image will be [height, width, 3]. + Returns: + A cropped image of the same rank as image and shape size. + ------- + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") + + def _input_to_factor_(image, size, is_hwc): + if len(image.shape) == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + + target_height, target_width = size + if target_height > height or target_width > width: + raise ValueError("Crop size {} is larger than input image size {}".format(size, (height, width))) + if target_height == height and target_width == width: + return 0, 0, height, width + + top = random.randint(0, height - target_height) + left = random.randint(0, width - target_width) + return top, left, target_height, target_width + + top, left, height, width = _input_to_factor_(image, size, is_hwc) + + return Crop(image, top, left, height, width, is_hwc) + + +def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): + ''' + + Parameters + ---------- + images: + Input an image to resize + size: + if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. + shape of resized image will be [height, width, 3]. + method: + An image.ResizeMethod, or string equivalent. Defaults to bilinear. + preserve_aspect_ratio: + Whether to preserve the aspect ratio. + antialias: + Whether to use an anti-aliasing filter when downsampling an image. + Returns: + an resized image + ------- + ''' + DE_PY_INTER_MODE = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'cubic': Image.CUBIC, + 'lanczos': Image.LANCZOS, + 'bicubic': Image.BICUBIC + } + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) or len(size) == 2: + target_height, target_width = size + size = (target_width, target_height) + else: + raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") + if method not in ('nearest', 'bilinear', 'cubic', 'lanczos', 'bicubic'): + raise TypeError('Unknown resize method! resize method must be in (nearest bilinear cubic lanczos bicubic)') + + if preserve_aspect_ratio: + width, height = image.size + target_width, target_height = size + scale_factor_height = float(target_height / height) + scale_factor_width = float(target_width / width) + scale_factor = np.minimum(scale_factor_height, scale_factor_width) + new_target_height = int(scale_factor * height) + new_target_width = int(scale_factor * width) + size = (new_target_width, new_target_height) + interpolation = DE_PY_INTER_MODE[method] + image = image.resize(size, interpolation) + if antialias: + image = image.resize(size, Image.ANTIALIAS) + + return ToTensor(image) + + +def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): + ''' + + Parameters + ---------- + image: + A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. + boxes: + A 2-D tensor of shape [num_boxes, 4]. + box_indices: + A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). + The value of box_ind[i] specifies the image that the i-th box refers to. + crop_size: + A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. + The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. + method: + An optional string specifying the sampling method for resizing. + It can be either "bilinear" or "nearest" and default to "bilinear". + extrapolation_value: + An optional float. Defaults to 0. Value used for extrapolation, when applicable. + Returns: + A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. + ------- + + ''' + if method not in ["bilinear", "nearest"]: + raise ValueError('method must be bilinear or nearest.') + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + boxes = np.asarray(boxes) + box_indices = np.asarray(box_indices) + image_shape = image.shape + if len(image_shape) == 4: + batch_size = image_shape[0] + elif len(image_shape) == 3: + image = np.expand_dims(image, axis=0) + else: + raise ValueError('Input must be a 3-D or 4-D image Tensor.') + + box_num = boxes.shape[0] # boxes.shape is [n,4]. n is the number of boxes. + if not is_hwc: # 判断通道顺序,为了便于后续计算,将通道顺序调整为HWC or BHWC + image = np.transpose(image, (0, 2, 3, 1)) + batch_size, height, width, channels = image.shape + return_image = np.zeros((box_num, crop_size[0], crop_size[1], 3)) + for i in range(box_num): + y1, x1, y2, x2 = boxes[i] # 首先判断图像是否需要翻转 , 若y1>y2 需要垂直翻转, 若x1>x2 需要水平翻转 + cur_image = image[box_indices[i]] + if y1 > y2: + cur_image = FlipVertical(cur_image) + y1, y2 = y2, y1 + if x1 > x2: + cur_image = FlipHorizontal(cur_image) + x1, x2 = x2, x1 + top_padding = 0 if y1 > 0 else int(round(height * (-y1))) + left_padding = 0 if x1 > 0 else int(round(width * (-x1))) + bottom_padding = 0 if y2 < 1 else int(round(height * (y2 - 1))) + right_padding = 0 if x2 < 1 else int(round(width * (x2 - 1))) + # 判断是否需要padding + target_height = top_padding + height + bottom_padding + target_width = left_padding + width + right_padding + if target_height != height or target_width != width: + cur_image = PadToBoundingbox( + cur_image, offset_height=top_padding, offset_width=left_padding, target_height=target_height, + target_width=target_width, padding_value=extrapolation_value, is_hwc=is_hwc + ) + offset_height = 0 if y1 < 0 else int(round(height * y1)) + offset_width = 0 if x1 < 0 else int(round(width * x1)) + target_height = int(round(height * (y2 - y1))) + target_width = int(round(width * (x2 - x1))) + crop_image = Crop(cur_image, offset_height, offset_width, target_height, target_width, is_hwc) + resized_image = Resize(crop_image, crop_size, method=method) + return_image[i] = resized_image + if not is_hwc: + return_image = np.transpose(return_image, (0, 3, 1, 2)) + return ToTensor(return_image) + + +def CropOrPad(image, target_height, target_width, is_hwc=True): + ''' + Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. + Parameters + ---------- + image: + 3-D Tensor of shape [height, width, channels]. + target_height: + Target height. + target_width: + Target width. + Returns: + Cropped and/or padded image. + ------- + ''' + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if shape_size == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + offset_height = height - target_height + offset_width = width - target_width + offset_crop_height = max(offset_height // 2, 0) + offset_crop_width = max(offset_width // 2, 0) + offset_pad_height = max(-offset_height // 2, 0) + offset_pad_width = max(-offset_width // 2, 0) + cropped = Crop( + image, offset_crop_height, offset_crop_width, min(height, target_height), min(width, target_width), is_hwc + ) + + padded = PadToBoundingbox(cropped, offset_pad_height, offset_pad_width, target_height, target_width, is_hwc=is_hwc) + + return ToTensor(padded) + + +def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): + ''' + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + target_height: + Target height. + target_width: + Target height. + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + Returns: + Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. + If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + shape_size = len(image.shape) + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if shape_size == 3: + if is_hwc: + height, width, channels = image.shape + else: + channels, height, width = image.shape + else: + if is_hwc: + batch, height, width, channels = image.shape + else: + batch, channels, height, width = image.shape + height = float(height) + width = float(width) + ratio = max(height / target_height, width / target_width) + resized_height = int(round(height / ratio)) + resized_width = int(round(width / ratio)) + padding_height = max(0, int(round((target_height - resized_height) / 2))) + padding_width = max(0, int(round((target_width - resized_width) / 2))) + resized = Resize( + image, size=(resized_height, resized_width), method=method, antialias=antialias + ) #需要解决 batch images的resize + padded = PadToBoundingbox(resized, padding_height, padding_width, target_height, target_width, is_hwc=is_hwc) + return ToTensor(padded) + + +def rgb_to_hsv(np_rgb_img, is_hwc): + """ + Convert RGB img to HSV img. + + Args: + np_rgb_img (numpy.ndarray): NumPy RGB image array of shape (H, W, C) or (C, H, W) to be converted. + is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). + + Returns: + np_hsv_img (numpy.ndarray), NumPy HSV image with same type of np_rgb_img. + """ + if is_hwc: + r, g, b = np_rgb_img[:, :, 0], np_rgb_img[:, :, 1], np_rgb_img[:, :, 2] + else: + r, g, b = np_rgb_img[0, :, :], np_rgb_img[1, :, :], np_rgb_img[2, :, :] + to_hsv = np.vectorize(colorsys.rgb_to_hsv) + h, s, v = to_hsv(r, g, b) + if is_hwc: + axis = 2 + else: + axis = 0 + np_hsv_img = np.stack((h, s, v), axis=axis) + return np_hsv_img + + +def RgbToHsv(image, is_hwc=True): + + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + + shape_size = len(image.shape) + + if not shape_size in (3, 4): + raise TypeError( + 'img shape should be (H, W, C)/(N, H, W, C)/(C ,H, W)/(N, C, H, W). \ + Got {}'.format(image.shape) + ) + + if shape_size == 3: + batch_size = 0 + if is_hwc: + num_channels = image.shape[2] + else: + num_channels = image.shape[0] + else: + batch_size = image.shape[0] + if is_hwc: + num_channels = image.shape[3] + else: + num_channels = image.shape[1] + + if num_channels != 3: + raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) + if batch_size == 0: + return ToTensor(rgb_to_hsv(image, is_hwc)) + return ToTensor([rgb_to_hsv(img, is_hwc) for img in image]) + + +def Transpose(image, order): + """ + Transpose the input image with order + """ + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if len(image.shape) == 3: + if len(order) != 3: + raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') + return ToTensor(np.transpose(image, order)) + elif len(image.shape) == 4: + if len(order) != 3: + raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') + return ToTensor(np.transpose(image, order)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def RandomRotation( + image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 +): + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError("If degrees is a single number, it cannot be negative.") + degrees = (-degrees, degrees) + elif isinstance(degrees, (list, tuple)): + if len(degrees) != 2: + raise ValueError("If degrees is a sequence, the length must be 2.") + else: + raise TypeError("Degrees must be a single non-negative number or a sequence") + + DE_PY_INTER_MODE = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'antialias': Image.ANTIALIAS, + 'bicubic': Image.BICUBIC + } + if fill_mode not in ('nearest', 'bilinear', 'antialias', 'bicubic'): + raise TypeError('Fill_mode must be in (nearest,bilinear, antialias,bicubic)') + + if isinstance(fill_value, int): + fill_value = tuple([fill_value] * 3) + + angle = random.uniform(degrees[0], degrees[1]) + fill_mode = DE_PY_INTER_MODE[fill_mode] + return ToTensor(image.rotate(angle, fill_mode, expand, center, fillcolor=fill_value)) + + +def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image + Input tensor. Must be 3D. + shift: + int or list/tuple, if shift is int, Width shift range will equal to height shift range. + if shift is list/tuple, shift range will be [width fraction, height fraction] + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order + int, order of spline interpolation. see ndimage.interpolation.affine_transform + Returns + Shifted Numpy image tensor. + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + + if isinstance(shift, numbers.Number): + width_fraction = shift + height_fraction = shift + elif isinstance(shift, list) or isinstance(shift, tuple): + if len(shift) == 2: + width_fraction = shift[0] + height_fraction = shift[1] + else: + raise ValueError('shift must be int or list/tuple of length 2') + + DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} + if fill_mode not in ('nearest', 'bilinear', 'bicubic'): + raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') + fill_mode = DE_PY_INTER_MODE[fill_mode] + width, height = image.size + max_dx = width_fraction * width + max_dy = height_fraction * height + translations = (np.round(random.uniform(-max_dx, max_dx)), np.round(random.uniform(-max_dy, max_dy))) + + scale = 1.0 + shear = 0.0 + output_size = image.size + center = (width * 0.5 + 0.5, height * 0.5 + 0.5) + + angle = math.radians(0) + shear = math.radians(shear) + shear = [shear, 0] + scale = 1.0 / scale + d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ + math.sin(angle + shear[0]) * math.sin(angle + shear[1]) + matrix = [ + math.cos(angle + shear[0]), + math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), + math.cos(angle + shear[1]), 0 + ] + matrix = [scale / d * m for m in matrix] + matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) + matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += center[0] + matrix[5] += center[1] + + if __version__ >= '5': + kwargs = {"fillcolor": fill_value} + else: + kwargs = {} + return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) + + +def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image + Input tensor. Must be 3D. + shift: + int or list/tuple, if shift is int, Width shift range will equal to height shift range. + if shift is list/tuple, shift range will be [width fraction, height fraction] + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order + int, order of spline interpolation. see ndimage.interpolation.affine_transform + Returns + Shifted Numpy image tensor. + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} + if fill_mode not in ('nearest', 'bilinear', 'bicubic'): + raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') + fill_mode = DE_PY_INTER_MODE[fill_mode] + width, height = image.size + translations = (0, 0) + scale = 1.0 + shear = degree + output_size = image.size + center = (width * 0.5 + 0.5, height * 0.5 + 0.5) + angle = math.radians(0) + + if shear is not None: + if isinstance(shear, numbers.Number): + shear = (-1 * shear, shear) + shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[0], shear[1])] + elif len(shear) == 2 or len(shear) == 4: + if len(shear) == 2: + shear = [shear[0], shear[1], shear[0], shear[1]] + elif len(shear) == 4: + shear = [s for s in shear] + shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[2], shear[3])] + else: + raise ValueError( + "Shear should be a single value or a tuple/list containing " + "two values. Got {}".format(shear) + ) + shear = [math.radians(s) for s in shear] + else: + shear = [0, 0] + + + d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ + math.sin(angle + shear[0]) * math.sin(angle + shear[1]) + matrix = [ + math.cos(angle + shear[0]), + math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), + math.cos(angle + shear[1]), 0 + ] + matrix = [scale / d * m for m in matrix] + matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) + matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += center[0] + matrix[5] += center[1] + + if __version__ >= '5': + kwargs = {"fillcolor": fill_value} + else: + kwargs = {} + return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) + + +def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image: + Input tensor. Must be 3D. + zoom_range: + Tuple of floats; zoom range for width and height. + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order: + int, order of spline interpolation. see ndimage.interpolation.affine_transform + + Returns + Zoomed Numpy image tensor. + ------- + + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, np.ndarray): + image = ToPIL(image) + if not isinstance(image, Image.Image): + raise TypeError(augment_error_message.format(type(image))) + DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} + if isinstance(zoom_range, list) or isinstance(zoom_range, tuple): + if len(zoom_range) == 2: + scale = random.uniform(zoom_range[0], zoom_range[1]) + else: + raise ValueError('The length of zoom_range must be 2') + else: + raise ValueError( + "Zoom_range should be a single value or a tuple/list containing " + "two values. Got {}".format(zoom_range) + ) + if fill_mode not in ('nearest', 'bilinear', 'bicubic'): + raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') + fill_mode = DE_PY_INTER_MODE[fill_mode] + width, height = image.size + translations = (0, 0) + shear = (0, 0) + output_size = image.size + center = (width * 0.5 + 0.5, height * 0.5 + 0.5) + angle = math.radians(0) + + d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ + math.sin(angle + shear[0]) * math.sin(angle + shear[1]) + matrix = [ + math.cos(angle + shear[0]), + math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), + math.cos(angle + shear[1]), 0 + ] + matrix = [scale / d * m for m in matrix] + matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) + matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += center[0] + matrix[5] += center[1] + + if __version__ >= '5': + kwargs = {"fillcolor": fill_value} + else: + kwargs = {} + return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) + + +def Rescale(image, scale, offset=0): + ''' + + Parameters + ---------- + image: + 3-D image or 4-D images + scale: + Float, the scale to apply to the inputs. + offset: + Float, the offset to apply to the inputs. + Returns: + rescaled images + ------- + ''' + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + + return ToTensor(image * scale + offset) + + +def RandomFlipVertical(image, prob=0.5): + + if prob > random.random(): + image = FlipVertical(image) + return image + + +def RandomFlipHorizontal(image, prob=0.5): + + if prob > random.random(): + image = FlipHorizontal(image) + return image + + +def HWC2CHW(image): + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + + image_shape = image.shape + if (len(image_shape) == 3): + return Transpose(image, (2, 0, 1)) + elif (len(image_shape) == 4): + return Transpose(image, (0, 3, 1, 2)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def CHW2HWC(image): + if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): + image = Decode(image) + if isinstance(image, Image.Image) or isinstance(image, np.ndarray): + image = ToTensor(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + + image_shape = image.shape + if (len(image_shape) == 3): + return Transpose(image, (1, 2, 0)) + elif (len(image_shape) == 4): + return Transpose(image, (0, 2, 3, 1)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/image/paddle_image.py b/tensorlayer/dataflow/image/paddle_image.py new file mode 100644 index 000000000..b33ef1503 --- /dev/null +++ b/tensorlayer/dataflow/image/paddle_image.py @@ -0,0 +1,19 @@ +import paddle +import numpy as np +from PIL import Image +from paddle.vision.transforms import functional as F + +__all_ = [ + 'Standardization', +] + + +def Standardization(img, mean, std, data_format='HWC'): + + if data_format == 'CHW': + mean = paddle.to_tensor(mean).reshape([-1, 1, 1]) + std = paddle.to_tensor(std).reshape([-1, 1, 1]) + else: + mean = paddle.to_tensor(mean) + std = paddle.to_tensor(std) + return (img - mean) / std diff --git a/tensorlayer/dataflow/image/tensorflow_image.py b/tensorlayer/dataflow/image/tensorflow_image.py new file mode 100644 index 000000000..ca0ce41a3 --- /dev/null +++ b/tensorlayer/dataflow/image/tensorflow_image.py @@ -0,0 +1,760 @@ +import tensorflow as tf +import numpy as np +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import ops +from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage +from tensorflow.python.framework import dtypes +from tensorflow.python.ops.image_ops_impl import convert_image_dtype +import numbers + +__all__ = [ + 'CentralCrop', + 'HsvToRgb', + 'AdjustBrightness', + 'AdjustContrast', + 'AdjustHue', + 'AdjustSaturation', + 'Crop', + 'FlipHorizontal', + 'FlipVertical', + 'GrayToRgb', + 'Standardization', + 'RgbToGray', + 'PadToBoundingbox', + 'Pad', + 'RandomBrightness', + 'RandomContrast', + 'RandomHue', + 'RandomSaturation', + 'RandomCrop', + 'Resize', + 'CropAndResize', + 'CropOrPad', + 'ResizeAndPad', + 'RgbToHsv', + 'Transpose', + 'RandomRotation', + 'RandomShift', + 'RandomShear', + 'RandomZoom', + 'Rescale', + 'RandomFlipVertical', + 'RandomFlipHorizontal', + 'HWC2CHW', + 'CHW2HWC', +] + + +def CentralCrop(image, central_fraction=None, size=None): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth], + or a 4-D Tensor of shape [batch_size, height, width, depth]. + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D / 4-D float Tensor, as per the input. + ------- + If backend is tensorflow, central_fraction will be used preferentially. if size is used,the height-width ratio will be equivalent to original ratio.. + If backend is mindspore, size will be used preferentially. + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if central_fraction is None: + outshape = np.shape(image) + if len(outshape) == 3: + h_axis = 0 + w_axis = 1 + elif len(outshape) == 4: + h_axis = 1 + w_axis = 2 + + if isinstance(size, numbers.Number): + target_height = size + target_width = size + elif isinstance(size, tuple) or isinstance(size, list): + if len(size) == 2: + target_height = size[0] + target_width = size[1] + else: + raise ValueError('The length of size must be 2') + else: + raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") + if target_height > outshape[h_axis] or target_width > outshape[w_axis]: + raise ValueError("Centralcrop image size must < original image size.") + central_fraction = max(target_height / outshape[h_axis], target_width / outshape[w_axis]) + else: + if central_fraction > 1 or central_fraction <= 0: + raise ValueError('central_fraction must be in (0,1].') + + return tf.image.central_crop(image, central_fraction) + + +def HsvToRgb(image): + + return tf.image.hsv_to_rgb(image) + + +def AdjustBrightness(image, factor): + + return tf.image.adjust_brightness(image, delta=factor) + + +def AdjustContrast(image, factor): + + return tf.image.adjust_contrast(image, contrast_factor=factor) + + +def AdjustHue(image, factor): + + return tf.image.adjust_hue(image, delta=factor) + + +def AdjustSaturation(image, factor): + + return tf.image.adjust_saturation(image, saturation_factor=factor) + + +def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): + ''' + + Parameters + ---------- + image: + A image or a batch of images + offset_height: + Vertical coordinate of the top-left corner of the result in the input. + offset_width: + Horizontal coordinate of the top-left corner of the result in the input. + target_height: + Height of the result. + target_width: + Width of the result. + + Returns: + Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] + ------- + ''' + + return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) + + +def FlipHorizontal(image): + + return tf.image.flip_left_right(image) + + +def FlipVertical(image): + + return tf.image.flip_up_down(image) + + +def GrayToRgb(image): + + return tf.image.grayscale_to_rgb(image) + + +def RgbToGray(image): + + return tf.image.rgb_to_grayscale(image) + + +def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): + + return tf.image.pad_to_bounding_box( + image, + offset_height, + offset_width, + target_height, + target_width, + ) + + +def Pad(image, padding, padding_value=0, mode='constant'): + ''' + + Parameters + ---------- + image: + A 3-D or 4-D Tensor. + padding: + An integer or a list/tuple. If a single number is provided, pad all borders with this value. + If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. + If 4 values are provided as a list or tuple, pad the (top, bottom, left, right) respectively. + padding_value: + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + mode: + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + Returns: + A padded Tensor. Has the same type as tensor. + ------- + + ''' + image_shape = image.shape + if len(image_shape) == 3: + batch_size = 0 + elif len(image_shape) == 4: + batch_size = image_shape[0] + else: + raise TypeError('Image must be a 3-D tensor or 4-D tensor.') + + if isinstance(padding, int): + padding = ((padding, padding), (padding, padding)) + elif isinstance(padding, list) or isinstance(padding, tuple): + if len(padding) == 2: + padding = ((padding[0], padding[0]), (padding[1], padding[1])) + elif len(padding) == 4: + padding = ((padding[0], padding[1]), (padding[2], padding[3])) + else: + raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) + else: + raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) + if batch_size == 0: + padding = (padding[0], padding[1], (0, 0)) + else: + padding = ((0, 0), padding[0], padding[1], (0, 0)) + + return tf.pad(image, padding, mode=mode, constant_values=padding_value) + + +def Standardization(image, mean=None, std=None, channel_mode=False): + ''' + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + image = tf.cast(image, tf.float32) + with ops.name_scope(None, 'Standardization', [image]) as scope: + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + if mean is not None and std is not None: + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + image -= mean + image = math_ops.divide(image, std, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + + elif mean is None and std is None: + if channel_mode: + num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:-1]) + #`num_pixels` is the number of elements in each channels of 'image' + image_mean = math_ops.reduce_mean(image, axis=[-2, -3], keepdims=True) + # `image_mean` is the mean of elements in each channels of 'image' + + stddev = math_ops.reduce_std(image, axis=[-2, -3], keepdims=True) + min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) + adjusted_sttdev = math_ops.maximum(stddev, min_stddev) + + image -= image_mean + image = math_ops.divide(image, adjusted_sttdev, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + + else: + num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) + #`num_pixels` is the number of elements in `image` + image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) + + # Apply a minimum normalization that protects us against uniform images. + stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) + min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) + adjusted_stddev = math_ops.maximum(stddev, min_stddev) + + image -= image_mean + image = math_ops.divide(image, adjusted_stddev, name=scope) + return convert_image_dtype(image, orig_dtype, saturate=True) + else: + raise ValueError('std and mean must both be None or not None') + + +def RandomBrightness(image, factor): + ''' + + Parameters + ---------- + image: + An image or images to adjust + factor: + Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). + Returns: + The brightness-adjusted image(s). + ------- + + ''' + + return tf.image.random_brightness(image, factor) + + +def RandomContrast(image, lower, upper, seed=None): + ''' + + Parameters + ---------- + image: + An image tensor with 3 or more dimensions. + lower: + float. Lower bound for the random contrast factor. + upper: + float. Upper bound for the random contrast factor. + seed: + A Python integer. Used to create a random seed. + + Returns: + The contrast-adjusted image(s). + ------- + ''' + + return tf.image.random_contrast(image, lower, upper, seed) + + +def RandomHue(image, factor, seed=None): + ''' + + Parameters + ---------- + image: + RGB image or images. The size of the last dimension must be 3. + factor: + float. The maximum value for the random factor. + seed: + An operation-specific seed. + + Returns: + Adjusted image(s), same shape and DType as `image`. + ------- + + ''' + + return tf.image.random_hue(image, factor, seed) + + +def RandomSaturation(image, lower, upper, seed=None): + ''' + Parameters + ---------- + image: + RGB image or images. The size of the last dimension must be 3. + lower: + float. Lower bound for the random saturation factor. + upper: + float. Upper bound for the random saturation factor. + seed: + An operation-specific seed. + + Returns: + Adjusted image(s), same shape and DType as `image`. + ------- + ''' + + return tf.image.random_saturation(image, lower, upper, seed) + + +def RandomCrop(image, size): + ''' + + Parameters + ---------- + image: + Input an image to crop. + size: + a list or tuple. if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. + shape of cropped image will be [height, width, 3]. + Returns: + A cropped image of the same rank as image and shape size. + ------- + ''' + + if isinstance(size, int): + crop_size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + crop_size = (size[0], size[1]) + else: + raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") + + if len(image.shape) == 3: + h, w, c = image.shape + crop_size = crop_size + (c, ) + elif len(image.shape) == 4: + b, h, w, c = image.shape + crop_size = (b, ) + crop_size + (c, ) + + return tf.image.random_crop(image, size=crop_size) + + +def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): + ''' + + Parameters + ---------- + images: + Input an image to resize + size: + if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. + shape of resized image will be [height, width, 3]. + method: + An image.ResizeMethod, or string equivalent shoulid be in + (bilinear, lanczos3, lanczos5, bicubic, gaussian, nearest, area, mitchellcubic). + Defaults to bilinear. + preserve_aspect_ratio: + Whether to preserve the aspect ratio. + antialias: + Whether to use an anti-aliasing filter when downsampling an image. + Returns: + an resized image + ------- + + ''' + if isinstance(size, int): + size = [size, size] + elif len(size) != 2: + raise ValueError('Size should be a single integer or a list/tuple (h, w) of length 2.') + + return tf.image.resize(image, size, method, preserve_aspect_ratio, antialias) + + +def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): + ''' + + Parameters + ---------- + image: + A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. + boxes: + A 2-D tensor of shape [num_boxes, 4]. + box_indices: + A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). + The value of box_ind[i] specifies the image that the i-th box refers to. + crop_size: + A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. + The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. + method: + An optional string specifying the sampling method for resizing. + It can be either "bilinear" or "nearest" and default to "bilinear". + extrapolation_value: + An optional float. Defaults to 0. Value used for extrapolation, when applicable. + Returns: + A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. + ------- + + ''' + image_shape = image.shape + boxes_num = 0 + if isinstance(boxes, tf.Tensor): + boxes_num = boxes.shape[0] + elif isinstance(boxes, np.ndarray) or isinstance(boxes, list) or isinstance(boxes, tuple): + boxes = tf.constant(boxes) + boxes_num = boxes.shape[0] + + if isinstance(crop_size, int): + crop_size = (crop_size, crop_size) + crop_size = tf.constant(crop_size) + elif isinstance(crop_size, np.ndarray) or isinstance(crop_size, list) or isinstance(crop_size, tuple): + crop_size = tf.constant(crop_size) + + if isinstance(box_indices, np.ndarray) or isinstance(box_indices, list) or isinstance(box_indices, tuple): + box_indices = tf.constant(box_indices) + # if input is an image. + # a 3-D Tensor of shape [image_height, image_width, depth] should use 'tf.expand_dims(image, axis = 0)' + # to convert input to a 4-D Tensor of shape [batch_size,image_height, image_width, depth] + if len(image_shape) == 3: + image = tf.expand_dims(image, axis=0) + box_indices = np.zeros((boxes_num), dtype=np.int) + box_indices = tf.constant(box_indices) + + return tf.image.crop_and_resize( + image, boxes=boxes, box_indices=box_indices, crop_size=crop_size, method=method, + extrapolation_value=extrapolation_value + ) + + +def CropOrPad(image, target_height, target_width, is_hwc=True): + ''' + Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + target_height: + Target height. + target_width: + Target width. + Returns: + Cropped and/or padded image. + ------- + ''' + + return tf.image.resize_with_crop_or_pad(image, target_height, target_width) + + +def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): + ''' + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + target_height: + Target height. + target_width: + Target height. + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + Returns: + Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. + If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. + ------- + + ''' + + return tf.image.resize_with_pad(image, target_height, target_width, method=method, antialias=antialias) + + +def RgbToHsv(image): + + return tf.image.rgb_to_hsv(image) + + +def Transpose(image, order): + image = ops.convert_to_tensor(image) + image = _AssertAtLeast3DImage(image) + shape = image.get_shape() + if shape.ndims == 3 or shape.ndims is None: + if len(order) != 3: + raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') + return array_ops.transpose(image, order) + elif shape.ndims == 4: + if len(order) != 4: + raise ValueError('if image is 4-D tensor, order should be a list/tuple with length of 4') + return array_ops.transpose(image, order) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def RandomRotation( + image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 +): + if isinstance(image, tf.Tensor): + image = np.asarray(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if is_hwc: + h, w, c = 0, 1, 2 + else: + h, w, c = 1, 2, 0 + if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): + raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') + + image = tf.keras.preprocessing.image.random_rotation( + image, degrees, h, w, c, fill_mode, fill_value, interpolation_order + ) + return tf.convert_to_tensor(image) + + +def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image + Input tensor. Must be 3D. + shift: + int or list/tuple, if shift is int, Width shift range will equal to height shift range. + if shift is list/tuple, shift range will be [width fraction, height fraction] + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order + int, order of spline interpolation. see ndimage.interpolation.affine_transform + Returns + Shifted Numpy image tensor. + ------- + + ''' + if isinstance(image, tf.Tensor): + image = np.asarray(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if isinstance(shift, numbers.Number): + width_fraction = shift + height_fraction = shift + elif isinstance(shift, list) or isinstance(shift, tuple): + if len(shift) == 2: + width_fraction = shift[0] + height_fraction = shift[1] + else: + raise ValueError('shift must be number or list/tuple of length 2') + + if is_hwc: + h, w, c = 0, 1, 2 + else: + h, w, c = 1, 2, 0 + if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): + raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') + + image = tf.keras.preprocessing.image.random_shift( + image, wrg=width_fraction, hrg=height_fraction, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, + cval=fill_value, interpolation_order=interpolation_order + ) + + return tf.convert_to_tensor(image) + + +def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image + Input tensor. Must be 3D. + degree: + Transformation intensity in degrees. + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order + int, order of spline interpolation. see ndimage.interpolation.affine_transform + Returns + Shifted Numpy image tensor. + ------- + + ''' + if isinstance(image, tf.Tensor): + image = np.asarray(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if is_hwc: + h, w, c = 0, 1, 2 + else: + h, w, c = 1, 2, 0 + + image = tf.keras.preprocessing.image.random_shear( + image, intensity=degree, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, + interpolation_order=interpolation_order + ) + return tf.convert_to_tensor(image) + + +def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): + ''' + + Parameters + ---------- + image: + Input tensor. Must be 3D. + zoom_range: + Tuple of floats; zoom range for width and height. + is_hwc: + The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). + fill_mode: + Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). + fill_value: + Value used for points outside the boundaries of the input if mode='constant'. + interpolation_order: + int, order of spline interpolation. see ndimage.interpolation.affine_transform + + Returns + Zoomed Numpy image tensor. + ------- + + ''' + if isinstance(image, tf.Tensor): + image = np.asarray(image) + if not isinstance(image, np.ndarray): + raise TypeError('img should be NumPy image. Got {}'.format(type(image))) + if isinstance(zoom_range, numbers.Number): + zoom_range = (zoom_range, zoom_range) + elif isinstance(zoom_range, list) or isinstance(zoom_range, tuple): + if len(zoom_range) == 2: + zoom_range = (zoom_range[0], zoom_range[1]) + else: + raise ValueError('shift must be number or list/tuple of length 2') + if is_hwc: + h, w, c = 0, 1, 2 + else: + h, w, c = 1, 2, 0 + + image = tf.keras.preprocessing.image.random_zoom( + image, zoom_range=zoom_range, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, + interpolation_order=interpolation_order + ) + return tf.convert_to_tensor(image) + + +def Rescale(image, scale, offset=0): + ''' + + Parameters + ---------- + image: + 3-D image or 4-D images + scale: + Float, the scale to apply to the inputs. + offset: + Float, the offset to apply to the inputs. + Returns: + rescaled images + ------- + ''' + image = tf.cast(image, dtype=tf.float32) + scale = tf.cast(scale, dtype=tf.float32) + offset = tf.cast(offset, dtype=tf.float32) + return image * scale + offset + + +def RandomFlipVertical(image): + + return tf.image.random_flip_up_down(image) + + +def RandomFlipHorizontal(image): + + return tf.image.random_flip_left_right(image) + + +def HWC2CHW(image): + + if (len(image.shape) == 3): + return Transpose(image, (2, 0, 1)) + elif (len(image.shape) == 4): + return Transpose(image, (0, 3, 1, 2)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def CHW2HWC(image): + + if (len(image.shape) == 3): + return Transpose(image, (1, 2, 0)) + elif (len(image.shape) == 4): + return Transpose(image, (0, 2, 3, 1)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py index fab126161..54e275f9e 100644 --- a/tensorlayer/dataflow/mindspore_data.py +++ b/tensorlayer/dataflow/mindspore_data.py @@ -21,6 +21,7 @@ 'Take', 'TextFlieDataset', 'TFRecordDataset', + 'Dataloader', ] @@ -158,10 +159,8 @@ def Prefetch(dataset, buffer_size): return dataset.config.set_prefetch_size(prefetch_size) - def Repeat(dataset, count=None): - return dataset.repeat(count) @@ -275,3 +274,14 @@ def Zip(datasets): ''' return ds.zip(datasets) + + +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): + + if shuffle: + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) + + dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) + dataset = Prefetch(dataset, buffer_size=prefetch) + + return dataset diff --git a/tensorlayer/dataflow/mindspore_image.py b/tensorlayer/dataflow/mindspore_image.py deleted file mode 100644 index e4c1fd9ab..000000000 --- a/tensorlayer/dataflow/mindspore_image.py +++ /dev/null @@ -1,305 +0,0 @@ -import mindspore.dataset as ms -import mindspore.dataset.vision.c_transforms as c_vision -import mindspore.dataset.vision.py_transforms as py_vision -import mindspore.dataset.vision.py_transforms_util as py_util -import numpy as np -from PIL import Image, ImageOps, ImageEnhance, __version__ - -__all__ = [ - 'CentralCrop', 'HsvToRgb', 'AdjustBrightness', 'AdjustContrast', 'AdjustHue', 'Crop', 'FlipHorizontal', - 'FlipVertical', 'GrayToRgb', 'RgbToGray', 'PadToBoundingBox' -] - -augment_error_message = 'img should be PIL image. Got {}. Use Decode() for encoded data or ToPIL() for decoded data.' - - -def CentralCrop(image, central_fraction=None, size=None): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth], - or a 4-D Tensor of shape [batch_size, height, width, depth]. - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D / 4-D float Tensor, as per the input. - ------- - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - - if size is None: - outshape = np.shape(image) - if len(outshape) == 3: - h_axis = 0 - w_axis = 1 - elif len(outshape) == 4: - h_axis = 1 - w_axis = 2 - - height = outshape[h_axis] - width = outshape[w_axis] - - target_height = height * central_fraction - target_width = width * central_fraction - - size = (target_height, target_width) - - return py_util.center_crop(image, size) - - -def HsvToRgb(image, is_hwc=True): - - image = np.asarray(image) - - return py_util.hsv_to_rgbs(image, is_hwc=is_hwc) - - -def AdjustBrightness(image, factor): - ''' - - Parameters - ---------- - image: - input NumPy image array or PIL image - factor: - factor should be in the range (-1,1) - Returns: - ------- - np darray image - ''' - - image = np.asarray(image) - image = image / 255 - image = image + factor - index = np.where(image > 1) - image[index] = 1 - index = np.where(image < 0) - image[index] = 0 - image = image * 255 - - return image - - -def AdjustContrast(image, factor): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image = ImageEnhance.Contrast(image).enhance(factor) - - image = np.array(image) - - return image - - -def AdjustHue(image, factor): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image_hue_factor = factor - if not -1 <= image_hue_factor <= 1: - raise ValueError('image_hue_factor {} is not in [-1, 1].'.format(image_hue_factor)) - - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - mode = image.mode - if mode in {'L', '1', 'I', 'F'}: - return image - - hue, saturation, value = image.convert('HSV').split() - - np_hue = np.array(hue, dtype=np.uint8) - - with np.errstate(over='ignore'): - np_hue += np.uint8(image_hue_factor * 255) - hue = Image.fromarray(np_hue, 'L') - - image = Image.merge('HSV', (hue, saturation, value)).convert(mode) - return image - - -def AdjustSaturation(image, factor): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - enhancer = ImageEnhance.Color(image) - image = enhancer.enhance(factor) - return image - - -def Crop(image, offset_height, offset_width, target_height, target_width): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - image = np.array( - image.crop((offset_width, offset_height, offset_width + target_width, offset_width + target_height)) - ) - return image - - -def FlipHorizontal(image): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image = np.array(image.transpose(Image.FLIP_LEFT_RIGHT)) - - return image - - -def FlipVertical(image): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image = np.array(image.transpose(Image.FLIP_TOP_BOTTOM)) - - return image - - -def GrayToRgb(image): - - image = np.asarray(image) - shape = image.shape - output_image = np.zeros((shape[0], shape[1], 3), dtype=np.uint8) - if len(shape) == 3: - for i in range(3): - output_image[:, :, i] = image[:, :, 1] - elif len(shape) == 2: - for i in range(3): - output_image[:, :, i] = image - - return output_image - - -def RgbToGray(image): - - if isinstance(image, np.ndarray): - image = Image.fromarray(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - ''' - 将彩色图像转换为灰度(模式“L”)时,库使用ITU-R 601-2 Luma转换: - L = R * 299/1000 + G * 587/1000 + B * 114/1000 - ''' - image = image.convert('L') - image = np.asarray(image) - - return image - - -def PadToBoundingBox(image, offset_height, offset_width, target_height, target_width): - ''' - - Parameters - ---------- - image: - A PIL image - offset_height: - Number of rows of zeros to add on top. - offset_width: - Number of columns of zeros to add on the left. - target_height: - Height of output image. - target_width - Width of output image. - Returns - A numpy ndarray image - ------- - ''' - - if offset_height < 0: - raise ValueError("offset_height must be >= 0") - if offset_width < 0: - raise ValueError("offset_width must be >= 0") - image = np.array(image) - shape = image.shape - top = offset_height - bottom = target_height - shape[0] - top - left = offset_width - right = target_width - shape[1] - left - - if bottom < 0: - raise ValueError("target_height must be >= offset_height + height") - - if right < 0: - raise ValueError("target_width must be >= offset_width + width") - - return np.pad(image, ((top, bottom), (left, right), (0, 0)), mode='constant') - - -def Standardization(image, mean=None, std=None, channel_mode=False): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - image = np.array(image, dtype=np.float32) - num_shape = image.shape - if mean is not None and std is not None: - if len(mean) != len(std): - raise ValueError("Length of mean and std must be equal") - if len(mean) == 1: - mean = [mean[0]] * num_shape[2] - std = [std[0]] * num_shape[2] - mean = np.array(mean, dtype=image.dtype) - std = np.array(std, dtype=image.dtype) - return (image - mean[:, None, None]) / std[:, None, None] - elif mean is None and std is None: - if channel_mode: - num_pixels = num_shape[0] * num_shape[1] - image_mean = np.mean(image, axis=(0, 1)) - stddev = np.std(image, axis=(0, 1)) - min_sttdev = 1 / np.sqrt(num_pixels) - min_sttdev = [min_sttdev] * num_shape[2] - adjusted_sttdev = np.maximum(stddev, min_sttdev) - - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return image - else: - num_pixels = num_shape[0] * num_shape[1] * num_shape[2] - image_mean = np.mean(image, axis=(0, 1, 2)) - image_mean = [image_mean] * 3 - stddev = np.std(image, axis=(0, 1, 2)) - min_sttdev = 1 / np.sqrt(num_pixels) - adjusted_sttdev = np.maximum(stddev, min_sttdev) - adjusted_sttdev = [adjusted_sttdev] * 3 - - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return image - else: - raise ValueError('std and mean must both be None or not None') diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py new file mode 100644 index 000000000..d001d56a6 --- /dev/null +++ b/tensorlayer/dataflow/paddle_data.py @@ -0,0 +1,131 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np +import paddle +from paddle.io import Dataset, BatchSampler, DataLoader, IterableDataset +__all__ = [ + 'Concat', + 'FromGenerator', + 'FromSlices', + 'Map', + # 'Shuffle', + # 'Batch', + 'Dataloader', +] + + +def to_list(value): + if value is None: + return value + if isinstance(value, (list, tuple)): + return list(value) + return [value] + + +class FromGenerator(Dataset): + + def __init__(self, generator): + + if not callable(generator): + raise TypeError("'generator' must be callable") + self.generator = generator() + self.datas = [] + self.labels = [] + for data, label in self.generator: + self.datas.append(data) + self.labels.append(label) + + def __getitem__(self, idx): + + x = self.datas[idx] + y = self.labels[idx] + + return x, y + + def __len__(self): + + return self.datas.shape[0] + + +class FromSlices(Dataset): + + def __init__(self, datas, transform = None): + self.datas = datas[0] + self.labels = datas[1] + self.transform = transform + + if len(self.datas) != len(self.labels): + raise ValueError('Datas and labels not have same shape of the 1st dimension.') + + def __getitem__(self, idx): + data = paddle.to_tensor(self.datas[idx], dtype='float32') + label = paddle.to_tensor(self.labels[idx], dtype='int64') + if self.transform is not None: + data = self.transform(data) + return data, label + + def __len__(self): + + return len(self.datas) + + +class Concat(IterableDataset): + + def __init__(self, datasets): + self.datasets = list(datasets) + assert len(self.datasets) > 0, "input datasets shoule not be empty" + for i, dataset in enumerate(self.datasets): + assert isinstance(dataset, IterableDataset), \ + "ChainDataset only support paddle.io.IterableDataset" + + def __iter__(self): + for dataset in self.datasets: + for sample in dataset: + yield sample + + +class Map(Dataset): + + def __init__(self, dataset, transform): + self.isDataset = False + self.transform = transform + if isinstance(dataset, Dataset): + self.isDataset = True + self.dataset = dataset + elif isinstance(dataset, list) or isinstance(dataset, tuple): + self.datas = dataset[0] + self.labels = dataset[1] + else: + raise TypeError( + " 'dataset' should be subclass instance of paddle.io.Dataset " + "or a [data, label] list/tulpe, not a {}".format(type(dataset)) + ) + + def __getitem__(self, idx): + if self.isDataset: + x = self.dataset[idx][0] + if not isinstance(x, np.ndarray): + x = np.asarray(x) + x = self.transform(x) + y = self.dataset[idx][1] + else: + x = self.datas[idx] + if not isinstance(x, np.ndarray): + x = np.asarray(x) + x = self.transform(x) + y = self.labels[idx] + + return x, y + + def __len__(self): + + if self.isDataset: + return len(self.dataset[0]) + else: + return len(self.datas) + + +def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): + + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py index ce50c77d0..8dca50f80 100644 --- a/tensorlayer/dataflow/tensorflow_data.py +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -21,6 +21,7 @@ 'TextFlieDataset', 'TFRecordDataset', 'Zip', + 'Dataloader', ] @@ -252,3 +253,14 @@ def Zip(datasets): ''' return tf.data.Dataset.zip(datasets) + + +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): + + if shuffle: + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) + + dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) + dataset = Prefetch(dataset, buffer_size=prefetch) + + return dataset diff --git a/tensorlayer/dataflow/tensorflow_image.py b/tensorlayer/dataflow/tensorflow_image.py deleted file mode 100644 index 39419b970..000000000 --- a/tensorlayer/dataflow/tensorflow_image.py +++ /dev/null @@ -1,200 +0,0 @@ -import tensorflow as tf -import numpy as np -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import array_ops -from tensorflow.python.framework import ops -from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage -from tensorflow.python.framework import dtypes -from tensorflow.python.ops.image_ops_impl import convert_image_dtype -__all__ = [ - 'CentralCrop', - 'HsvToRgb', - 'AdjustBrightness', - 'AdjustContrast', - 'AdjustHue', - 'AdjustSaturation', - 'Crop', - 'FlipHorizontal', - 'FlipVertical', - 'GrayToRgb', - 'Standardization', -] - - -def CentralCrop(image, central_fraction=None, size=None): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth], - or a 4-D Tensor of shape [batch_size, height, width, depth]. - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D / 4-D float Tensor, as per the input. - ------- - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - - if central_fraction is None: - outshape = np.shape(image) - if len(outshape) == 3: - h_axis = 0 - w_axis = 1 - elif len(outshape) == 4: - h_axis = 1 - w_axis = 2 - - if isinstance(size, int): - target_height = size - target_width = size - elif isinstance(size, tuple): - target_height = size[0] - target_width = size[1] - - central_fraction = max(target_height // outshape[h_axis], target_width // outshape[w_axis]) - - return tf.image.central_crop(image, central_fraction) - - -def HsvToRgb(image): - - return tf.image.hsv_to_rgb(image) - - -def AdjustBrightness(image, factor): - - return tf.image.adjust_brightness(image, delta=factor) - - -def AdjustContrast(image, factor): - - return tf.image.adjust_contrast(image, contrast_factor=factor) - - -def AdjustHue(image, factor): - - return tf.image.adjust_hue(image, delta=factor) - - -def AdjustSaturation(image, factor): - - return tf.image.adjust_saturation(image, saturation_factor=factor) - - -def Crop(image, offset_height, offset_width, target_height, target_width): - ''' - - Parameters - ---------- - image: - A image or a batch of images - offset_height: - Vertical coordinate of the top-left corner of the result in the input. - offset_width: - Horizontal coordinate of the top-left corner of the result in the input. - target_height: - Height of the result. - target_width: - Width of the result. - - Returns: - Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] - ------- - ''' - - return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) - - -def FlipHorizontal(image): - - return tf.image.flip_left_right(image) - - -def FlipVertical(image): - - return tf.image.flip_up_down(image) - - -def GrayToRgb(image): - - return tf.image.grayscale_to_rgb(image) - - -def RgbToGray(image): - - return tf.image.rgb_to_grayscale(image) - - -def PadToBoundingBox(image, offset_height, offset_width, target_height, target_width): - - return tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width) - - -def Standardization(image, mean=None, std=None, channel_mode=False): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - with ops.name_scope(None, 'Standardization', [image]) as scope: - image = ops.convert_to_tensor(image, name='image') - image = _AssertAtLeast3DImage(image) - - orig_dtype = image.dtype - if orig_dtype not in [dtypes.float16, dtypes.float32]: - image = convert_image_dtype(image, dtypes.float32) - - if mean is not None and std is not None: - mean = np.array(mean, dtype=np.float32) - std = np.array(std, dtype=np.float32) - image -= mean - image = math_ops.divide(image, std, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - elif mean is None and std is None: - if channel_mode: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:-1]) - #`num_pixels` is the number of elements in each channels of 'image' - image_mean = math_ops.reduce_mean(image, axis=[-2, -3], keepdims=True) - # `image_mean` is the mean of elements in each channels of 'image' - - stddev = math_ops.reduce_std(image, axis=[-2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_sttdev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_sttdev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - else: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) - #`num_pixels` is the number of elements in `image` - image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) - - # Apply a minimum normalization that protects us against uniform images. - stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_stddev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_stddev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - else: - raise ValueError('std and mean must both be None or not None') diff --git a/tensorlayer/layers/convolution/__init__.py b/tensorlayer/layers/convolution/__init__.py index 668736e80..12aaa1485 100644 --- a/tensorlayer/layers/convolution/__init__.py +++ b/tensorlayer/layers/convolution/__init__.py @@ -9,16 +9,16 @@ More functions can be found in `TensorFlow API `__. """ -# from .binary_conv import * +from .binary_conv import * from .deformable_conv import * from .depthwise_conv import * -# from .dorefa_conv import * +from .dorefa_conv import * # from .expert_conv import * # from .expert_deconv import * -# from .group_conv import * +from .group_conv import * from .quan_conv import * from .quan_conv_bn import * -# from .separable_conv import * +from .separable_conv import * from .simplified_conv import * # from .simplified_deconv import * from .super_resolution import * @@ -52,7 +52,7 @@ # 'AtrousDeConv2d', # binary - # 'BinaryConv2d', + 'BinaryConv2d', # deformable 'DeformableConv2d', @@ -61,14 +61,14 @@ 'DepthwiseConv2d', # dorefa - # 'DorefaConv2d', + 'DorefaConv2d', # group - # 'GroupConv2d', + 'GroupConv2d', # separable - # 'SeparableConv1d', - # 'SeparableConv2d', + 'SeparableConv1d', + 'SeparableConv2d', # subpixel 'SubpixelConv1d', diff --git a/tensorlayer/layers/convolution/binary_conv.py b/tensorlayer/layers/convolution/binary_conv.py new file mode 100644 index 000000000..e5ab6c5a4 --- /dev/null +++ b/tensorlayer/layers/convolution/binary_conv.py @@ -0,0 +1,155 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.backend import BACKEND + +__all__ = [ + 'BinaryConv2d', +] + + +class BinaryConv2d(Module): + """ + The :class:`BinaryConv2d` class is a 2D binary CNN layer, which weights are either -1 or 1 while inference. + + Note that, the bias vector would not be binarized. + + Parameters + ---------- + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([8, 100, 100, 32], name='input') + >>> binaryconv2d = tl.layers.BinaryConv2d( + ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tl.relu, in_channels=32, name='binaryconv2d' + ... )(net) + >>> print(binaryconv2d) + >>> output shape : (8, 50, 50, 64) + + """ + + def __init__( + self, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=None, padding='VALID', data_format="channels_last", + dilation_rate=(1, 1), W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), in_channels=None, name=None + ): + super(BinaryConv2d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self._strides = self.strides = strides + self.padding = padding + self.data_format = data_format + self._dilation_rate = self.dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "BinaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.act_init_flag = False + if self.act: + self.act_init_flag = True + + self.binaryconv2d = tl.ops.BinaryConv2D( + strides=self._strides, + padding=self.padding, + data_format=self.data_format, + dilations=self._dilation_rate, + out_channel=self.n_filter, + k_size=self.filter_size, + in_channel=self.in_channels, + ) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self.binaryconv2d(inputs, self.W) + + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + return outputs diff --git a/tensorlayer/layers/convolution/dorefa_conv.py b/tensorlayer/layers/convolution/dorefa_conv.py new file mode 100644 index 000000000..50396cd7e --- /dev/null +++ b/tensorlayer/layers/convolution/dorefa_conv.py @@ -0,0 +1,168 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module + +__all__ = [ + 'DorefaConv2d', +] + + +class DorefaConv2d(Module): + """The :class:`DorefaConv2d` class is a 2D quantized convolutional layer, which weights are 'bitW' bits and the output of the previous layer + are 'bitA' bits while inferencing. + + Note that, the bias vector would not be binarized. + + Parameters + ---------- + bitW : int + The bits of this layer's parameter + bitA : int + The bits of the output of previous layer + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([8, 12, 12, 32], name='input') + >>> dorefaconv2d = tl.layers.DorefaConv2d( + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.relu, padding='SAME', name='dorefaconv2d' + ... )(net) + >>> print(dorefaconv2d) + >>> output shape : (8, 12, 12, 32) + + """ + + def __init__( + self, + bitW=1, + bitA=3, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + act=None, + padding='SAME', + data_format="channels_last", + dilation_rate=(1, 1), + W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None # 'dorefa_cnn2d', + ): + super().__init__(name, act=act) + self.bitW = bitW + self.bitA = bitA + self.n_filter = n_filter + self.filter_size = filter_size + self.strides = self._strides = strides + self.padding = padding + self.data_format = data_format + self.dilation_rate = self._dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "DorefaConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.act_init_flag = False + if self.act: + self.act_init_flag = True + + self.dorefaconv2d = tl.ops.DorefaConv2D( + bitW=self.bitW, bitA=self.bitA, strides=self._strides, padding=self.padding, data_format=self.data_format, + dilations=self._dilation_rate, out_channel=self.n_filter, k_size=self.filter_size, + in_channel=self.in_channels + ) + + def forward(self, inputs): + + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self.dorefaconv2d(inputs, self.W) + + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + return outputs diff --git a/tensorlayer/layers/convolution/group_conv.py b/tensorlayer/layers/convolution/group_conv.py new file mode 100644 index 000000000..cbbbd473f --- /dev/null +++ b/tensorlayer/layers/convolution/group_conv.py @@ -0,0 +1,164 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.backend import BACKEND + +__all__ = [ + 'GroupConv2d', +] + + +class GroupConv2d(Module): + """The :class:`GroupConv2d` class is 2D grouped convolution, see `here `__. + Parameters + -------------- + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size. + stride : tuple of int + The stride step. + n_group : int + The number of groups. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + "channels_last" (NHWC, default) or "channels_first" (NCHW). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + Examples + --------- + With TensorLayer + >>> net = tl.layers.Input([8, 24, 24, 32], name='input') + >>> groupconv2d = tl.layers.QuanConv2d( + ... n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, name='group' + ... )(net) + >>> print(groupconv2d) + >>> output shape : (8, 12, 12, 64) + """ + + def __init__( + self, n_filter=32, filter_size=(1, 1), strides=(1, 1), n_group=1, act=None, padding='SAME', + data_format="channels_last", dilation_rate=(1, 1), W_init=tl.initializers.truncated_normal(stddev=0.02), + b_init=tl.initializers.constant(value=0.0), in_channels=None, name=None + ): + super().__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self._strides = self.strides = strides + self.n_group = n_group + self.padding = padding + self.data_format = data_format + self._dilation_rate = self.dilation_rate = dilation_rate + self.W_init = W_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "Conv2d %s: n_filter: %d filter_size: %s strides: %s n_group: %d pad: %s act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), n_group, padding, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else "No Activation" + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', strides={strides}, n_group = {n_group}, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation = {dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (',', +actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + if self.n_group < 1: + raise ValueError( + "The n_group must be a integer greater than or equal to 1, but we got :{}".format(self.n_group) + ) + + if self.in_channels % self.n_group != 0: + raise ValueError( + "The channels of input must be divisible by n_group, but we got: the channels of input" + "is {}, the n_group is {}.".format(self.in_channels, self.n_group) + ) + + if self.n_filter % self.n_group != 0: + raise ValueError( + "The number of filters must be divisible by n_group, but we got: the number of filters " + "is {}, the n_group is {}. ".format(self.n_filter, self.n_group) + ) + + # TODO channels first filter shape [out_channel, in_channel/n_group, filter_h, filter_w] + self.filter_shape = ( + self.filter_size[0], self.filter_size[1], int(self.in_channels / self.n_group), self.n_filter + ) + + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.group_conv2d = tl.ops.GroupConv2D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]), groups=self.n_group + ) + + self.act_init_flag = False + if self.act: + self.act_init_flag = True + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self.group_conv2d(inputs, self.W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + return outputs diff --git a/tensorlayer/layers/convolution/separable_conv.py b/tensorlayer/layers/convolution/separable_conv.py new file mode 100644 index 000000000..b837e4ed7 --- /dev/null +++ b/tensorlayer/layers/convolution/separable_conv.py @@ -0,0 +1,319 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer import logging +from tensorlayer.layers.core import Module +from tensorlayer.backend import BACKEND + +__all__ = [ + 'SeparableConv1d', + 'SeparableConv2d', +] + + +class SeparableConv1d(Module): + """The :class:`SeparableConv1d` class is a 1D depthwise separable convolutional layer. + This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters + ------------ + n_filter : int + The dimensionality of the output space (i.e. the number of filters in the convolution). + filter_size : int + Specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. + strides : int + Specifying the stride of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + act : activation function + The activation function of this layer. + padding : str + One of "valid" or "same" (case-insensitive). + data_format : str + One of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). + dilation_rate : int + Specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + depth_multiplier : int + The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. + depthwise_init : initializer + for the depthwise convolution kernel. + pointwise_init : initializer + For the pointwise convolution kernel. + b_init : initializer + For the bias vector. If None, ignore bias in the pointwise part only. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + Examples + -------- + With TensorLayer + >>> net = tl.layers.Input([8, 50, 64], name='input') + >>> separableconv1d = tl.layers.SeparableConv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d')(net) + >>> print(separableconv1d) + >>> output shape : (8, 25, 32) + """ + + def __init__( + self, n_filter=32, filter_size=1, stride=1, act=None, padding="SAME", data_format="channels_last", + dilation_rate=1, depth_multiplier=1, depthwise_init=tl.initializers.truncated_normal(stddev=0.02), + pointwise_init=tl.initializers.truncated_normal(stddev=0.02), b_init=tl.initializers.constant(value=0.0), + in_channels=None, name=None + ): + super(SeparableConv1d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self.stride = stride + self.padding = padding + self.data_format = data_format + self.dilation_rate = dilation_rate + self.depth_multiplier = depth_multiplier + self.depthwise_init = depthwise_init + self.pointwise_init = pointwise_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "SeparableConv1d %s: n_filter: %d filter_size: %s strides: %s depth_multiplier: %d act: %s" % ( + self.name, n_filter, str(filter_size), str(stride), depth_multiplier, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', stride={strides}, padding={padding}' + ) + if self.dilation_rate != 1: + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + elif self.data_format == 'channels_first': + self.data_format = 'NCW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + else: + raise Exception("data_format should be either channels_last or channels_first") + + if BACKEND == 'tensorflow': + self.depthwise_filter_shape = (self.filter_size, self.in_channels, self.depth_multiplier) + self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) + elif BACKEND == 'mindspore': + self.depthwise_filter_shape = (self.filter_size, 1, self.depth_multiplier * self.in_channels) + self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) + + self.depthwise_W = self._get_weights( + 'depthwise_filters', shape=self.depthwise_filter_shape, init=self.depthwise_init + ) + self.pointwise_W = self._get_weights( + 'pointwise_filters', shape=self.pointwise_filter_shape, init=self.pointwise_init + ) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.act_init_flag = False + if self.act: + self.activate = self.act + self.act_init_flag = True + + self.separable_conv1d = tl.ops.SeparableConv1D( + stride=self.stride, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, + out_channel=self.n_filter, k_size=self.filter_size, in_channel=self.in_channels, + depth_multiplier=self.depth_multiplier + ) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self.separable_conv1d(inputs, self.depthwise_W, self.pointwise_W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + return outputs + + +class SeparableConv2d(Module): + """The :class:`SeparableConv2d` class is a 2D depthwise separable convolutional layer. + This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters + ------------ + n_filter : int + The dimensionality of the output space (i.e. the number of filters in the convolution). + filter_size : tuple of int + Specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. + strides : tuple of int + Specifying the stride of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + act : activation function + The activation function of this layer. + padding : str + One of "valid" or "same" (case-insensitive). + data_format : str + One of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). + dilation_rate : tuple of int + Specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + depth_multiplier : int + The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. + depthwise_init : initializer + for the depthwise convolution kernel. + pointwise_init : initializer + For the pointwise convolution kernel. + b_init : initializer + For the bias vector. If None, ignore bias in the pointwise part only. + in_channels : int + The number of in channels. + name : None or str + A unique layer name. + Examples + -------- + With TensorLayer + >>> net = tl.layers.Input([8, 50, 50, 64], name='input') + >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tf.nn.relu, name='separable_2d')(net) + >>> print(separableconv2d) + >>> output shape : (8, 24, 24, 32) + """ + + def __init__( + self, n_filter=32, filter_size=(1, 1), strides=(1, 1), act=None, padding="VALID", data_format="channels_last", + dilation_rate=(1, 1), depth_multiplier=1, depthwise_init=tl.initializers.truncated_normal(stddev=0.02), + pointwise_init=tl.initializers.truncated_normal(stddev=0.02), b_init=tl.initializers.constant(value=0.0), + in_channels=None, name=None + ): + super(SeparableConv2d, self).__init__(name, act=act) + self.n_filter = n_filter + self.filter_size = filter_size + self._strides = self.strides = strides + self.padding = padding + self.data_format = data_format + self._dilation_rate = self.dilation_rate = dilation_rate + self.depth_multiplier = depth_multiplier + self.depthwise_init = depthwise_init + self.pointwise_init = pointwise_init + self.b_init = b_init + self.in_channels = in_channels + + if self.in_channels: + self.build(None) + self._built = True + + logging.info( + "SeparableConv2d %s: n_filter: %d filter_size: %s strides: %s depth_multiplier: %d act: %s" % ( + self.name, n_filter, str(filter_size), str(strides), depth_multiplier, + self.act.__class__.__name__ if self.act is not None else 'No Activation' + ) + ) + + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' + s = ( + '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' + ', stride={strides }, padding={padding}' + ) + if self.dilation_rate != (1, ) * len(self.dilation_rate): + s += ', dilation={dilation_rate}' + if self.b_init is None: + s += ', bias=False' + s += (', ' + actstr) + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + if self.in_channels is None: + self.in_channels = inputs_shape[-1] + self._strides = [1, self._strides[0], self._strides[1], 1] + self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + if self.in_channels is None: + self.in_channels = inputs_shape[1] + self._strides = [1, 1, self._strides[0], self._strides[1]] + self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] + else: + raise Exception("data_format should be either channels_last or channels_first") + + if BACKEND == 'tensorflow': + self.depthwise_filter_shape = ( + self.filter_size[0], self.filter_size[1], self.in_channels, self.depth_multiplier + ) + self.pointwise_filter_shape = (1, 1, self.depth_multiplier * self.in_channels, self.n_filter) + + elif BACKEND == 'mindspore': + self.depthwise_filter_shape = ( + self.filter_size[0], self.filter_size[1], 1, self.depth_multiplier * self.in_channels + ) + self.pointwise_filter_shape = (1, 1, self.depth_multiplier * self.in_channels, self.n_filter) + + self.depthwise_W = self._get_weights( + 'depthwise_filters', shape=self.depthwise_filter_shape, init=self.depthwise_init + ) + + self.pointwise_W = self._get_weights( + 'pointwise_filters', shape=self.pointwise_filter_shape, init=self.pointwise_init + ) + + self.b_init_flag = False + if self.b_init: + self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) + self.bias_add = tl.ops.BiasAdd(self.data_format) + self.b_init_flag = True + + self.act_init_flag = False + if self.act: + self.act_init_flag = True + + self.separable_conv2d = tl.ops.SeparableConv2D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + out_channel=self.n_filter, k_size=self.filter_size, in_channel=self.in_channels, + depth_multiplier=self.depth_multiplier + ) + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + outputs = self.separable_conv2d(inputs, self.depthwise_W, self.pointwise_W) + if self.b_init_flag: + outputs = self.bias_add(outputs, self.b) + if self.act_init_flag: + outputs = self.act(outputs) + return outputs + + +if __name__ == '__main__': + net = tl.layers.Input([5, 400, 400, 3], name='input') + layer = SeparableConv2d( + in_channels=3, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=3, + name='separableconv2d1' + ) + print(len(layer.all_weights)) + print(layer(net).shape) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 51cc9a7ce..006b34deb 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -5,7 +5,6 @@ from tensorlayer import logging from tensorlayer.layers.core import Module - __all__ = [ 'PoolLayer', 'MaxPool1d', @@ -20,6 +19,12 @@ 'GlobalMeanPool2d', 'GlobalMaxPool3d', 'GlobalMeanPool3d', + 'AdaptiveMeanPool1d', + 'AdaptiveMeanPool2d', + 'AdaptiveMeanPool3d', + 'AdaptiveMaxPool1d', + 'AdaptiveMaxPool2d', + 'AdaptiveMaxPool3d', 'CornerPool2d', ] @@ -923,9 +928,9 @@ class CornerPool2d(Module): """ def __init__( - self, - mode='TopLeft', - name=None # 'cornerpool2d' + self, + mode='TopLeft', + name=None # 'cornerpool2d' ): super().__init__(name) self.mode = mode @@ -958,7 +963,7 @@ def forward(self, inputs): ) temp_bottom = tl.ops.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID') temp_right = tl.ops.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID') - outputs = tl.add(temp_bottom, temp_right)#, name=self.name) + outputs = tl.add(temp_bottom, temp_right) #, name=self.name) elif self.mode == 'BottomRight': temp_top = tl.pad( inputs, tl.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min @@ -973,7 +978,343 @@ def forward(self, inputs): outputs = tl.identity(inputs) return outputs -if __name__ == '__main__': - net = tl.layers.Input([None, 32, 32, 8], name='input') - net = CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) - print(net) \ No newline at end of file + +class AdaptiveMeanPool1d(Module): + """The :class:`AdaptiveMeanPool1d` class is a 1D Adaptive Mean Pooling layer. + + Parameters + ------------ + output_size : int + The target output size. It must be an integer. + data_format : str + One of channels_last (default, [batch, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMeanPool1d(output_size=16)(net) + >>> output shape : [None, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMeanPool1d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMeanPool1d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCW' + else: + raise Exception("unsupported data format") + + self.adaptivemeanpool1d = tl.ops.AdaptiveMeanPool1D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemeanpool1d(inputs) + return outputs + + +class AdaptiveMeanPool2d(Module): + """The :class:`AdaptiveMeanPool2d` class is a 2D Adaptive Mean Pooling layer. + + Parameters + ------------ + output_size : int or list or tuple + The target output size. It cloud be an int \[int,int]\(int, int). + data_format : str + One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None,32, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMeanPool2d(output_size=16)(net) + >>> output shape : [None,16, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMeanPool2d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMeanPool2d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + else: + raise Exception("unsupported data format") + + if isinstance(self.output_size, int): + self.output_size = (self.output_size, ) * 2 + + self.adaptivemeanpool2d = tl.ops.AdaptiveMeanPool2D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemeanpool2d(inputs) + return outputs + + +class AdaptiveMeanPool3d(Module): + """The :class:`AdaptiveMeanPool3d` class is a 3D Adaptive Mean Pooling layer. + + Parameters + ------------ + output_size : int or list or tuple + The target output size. It cloud be an int \[int,int,int]\(int, int, int). + data_format : str + One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMeanPool3d(output_size=16)(net) + >>> output shape : [None, 16, 16, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMeanPool3d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMeanPool3d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NDHWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCDHW' + else: + raise Exception("unsupported data format") + + if isinstance(self.output_size, int): + self.output_size = (self.output_size, ) * 3 + + self.adaptivemeanpool3d = tl.ops.AdaptiveMeanPool3D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemeanpool3d(inputs) + return outputs + + +class AdaptiveMaxPool1d(Module): + """The :class:`AdaptiveMaxPool1d` class is a 1D Adaptive Max Pooling layer. + + Parameters + ------------ + output_size : int + The target output size. It must be an integer. + data_format : str + One of channels_last (default, [batch, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMaxPool1d(output_size=16)(net) + >>> output shape : [None, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMaxPool1d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMaxPool1d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCW' + else: + raise Exception("unsupported data format") + + self.adaptivemaxpool1d = tl.ops.AdaptiveMaxPool1D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemaxpool1d(inputs) + return outputs + + +class AdaptiveMaxPool2d(Module): + """The :class:`AdaptiveMaxPool2d` class is a 2D Adaptive Max Pooling layer. + + Parameters + ------------ + output_size : int or list or tuple + The target output size. It cloud be an int \[int,int]\(int, int). + data_format : str + One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None, 32, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMaxPool2d(output_size=16)(net) + >>> output shape : [None, 16, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMaxPool2d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMaxPool1d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NHWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCHW' + else: + raise Exception("unsupported data format") + if isinstance(self.output_size, int): + self.output_size = (self.output_size, ) * 2 + + self.adaptivemaxpool2d = tl.ops.AdaptiveMaxPool2D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemaxpool2d(inputs) + return outputs + + +class AdaptiveMaxPool3d(Module): + """The :class:`AdaptiveMaxPool3d` class is a 3D Adaptive Max Pooling layer. + + Parameters + ------------ + output_size : int or list or tuple + The target output size. It cloud be an int \[int,int,int]\(int, int, int). + data_format : str + One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs. + name : None or str + A unique layer name. + + Examples + --------- + With TensorLayer + + >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.AdaptiveMaxPool3d(output_size=16)(net) + >>> output shape : [None, 16, 16, 16, 3] + + """ + + def __init__(self, output_size, data_format='channels_last', name=None): + super(AdaptiveMaxPool3d, self).__init__(name) + self.output_size = output_size + self.data_format = data_format + + self.build() + self._built = True + + logging.info("AdaptiveMaxPool3d %s: output_size: %s " % (self.name, str(output_size))) + + def __repr__(self): + s = ('{classname}(output_size={output_size}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + if self.data_format == 'channels_last': + self.data_format = 'NDHWC' + elif self.data_format == 'channels_first': + self.data_format = 'NCDHW' + else: + raise Exception("unsupported data format") + + if isinstance(self.output_size, int): + self.output_size = (self.output_size, ) * 3 + + self.adaptivemaxpool3d = tl.ops.AdaptiveMaxPool3D(output_size=self.output_size, data_format=self.data_format) + + def forward(self, inputs): + + outputs = self.adaptivemaxpool3d(inputs) + return outputs diff --git a/tensorlayer/metric/__init__.py b/tensorlayer/metric/__init__.py new file mode 100644 index 000000000..c11f8323b --- /dev/null +++ b/tensorlayer/metric/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from tensorlayer.backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_metric import * +elif BACKEND == 'mindspore': + from .mindspore_metric import * +elif BACKEND == 'dragon': + pass +elif BACKEND == 'paddle': + from .paddle_metric import * +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/metric/mindspore_metric.py b/tensorlayer/metric/mindspore_metric.py new file mode 100644 index 000000000..bcc6499d0 --- /dev/null +++ b/tensorlayer/metric/mindspore_metric.py @@ -0,0 +1,88 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import mindspore.nn as nn +from mindspore.nn.metrics._evaluation import EvaluationBase +from mindspore.nn.metrics.metric import Metric +__all__ = [ + 'Accuracy', + 'Auc', + 'Precision', + 'Recall', +] + + +class Accuracy(object): + + def __init__(self, topk=1): + + self.accuracy = nn.TopKCategoricalAccuracy(k=topk) + + def update(self, y_pred, y_true): + + self.accuracy.update(y_pred, y_true) + + def result(self): + + return self.accuracy.eval() + + def reset(self): + + self.accuracy.clear() + + +class Auc(object): + + def __init__(self): + + pass + + def update(self, y_pred, y_true): + + raise Exception('Auc metric function not implemented') + + def result(self): + + pass + + def reset(self): + + pass + + +class Precision(object): + + def __init__(self): + + self.precision = nn.Precision(eval_type="classification") + + def update(self, y_pred, y_true): + + self.precision.update(y_pred, y_true) + + def result(self): + + return self.precision.eval() + + def reset(self): + + self.precision.clear() + + +class Recall(object): + + def __init__(self): + + self.recall = nn.Recall(eval_type="classification") + + def update(self, y_pred, y_true): + + self.recall.update(y_pred, y_true) + + def result(self): + + return self.recall.eval() + + def reset(self): + + self.recall.clear() diff --git a/tensorlayer/metric/paddle_metric.py b/tensorlayer/metric/paddle_metric.py new file mode 100644 index 000000000..b6b3f3257 --- /dev/null +++ b/tensorlayer/metric/paddle_metric.py @@ -0,0 +1,89 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import paddle +from paddle.metric.metrics import Metric + +__all__ = [ + 'Accuracy', + 'Auc', + 'Precision', + 'Recall', +] + + +class Accuracy(object): + + def __init__( + self, + topk=1, + ): + + self.topk = topk + self.accuracy = paddle.metric.Accuracy(topk=(self.topk, )) + + def update(self, y_pred, y_true): + + self.accuracy.update(self.accuracy.compute(y_pred, y_true)) + + def result(self): + + return self.accuracy.accumulate() + + def reset(self): + + self.accuracy.reset() + + +class Auc(object): + + def __init__(self, curve='ROC', num_thresholds=4095): + + self.auc = paddle.metric.Auc(curve=curve, num_thresholds=num_thresholds) + + def update(self, y_pred, y_true): + + self.auc.update(y_pred, y_true) + + def result(self): + + return self.auc.accumulate() + + def reset(self): + + self.auc.reset() + + +class Precision(object): + + def __init__(self): + + self.precision = paddle.metric.Precision() + + def update(self, y_pred, y_true): + + self.precision.update(y_pred, y_true) + + def result(self): + + return self.precision.accumulate() + + def reset(self): + + self.precision.reset() + + +class Recall(object): + + def __init__(self): + + self.recall = paddle.metric.Recall() + + def update(self, y_pred, y_true): + self.recall.update(y_pred, y_true) + + def result(self): + return self.recall.accumulate() + + def reset(self): + self.recall.reset() diff --git a/tensorlayer/metric/tensorflow_metric.py b/tensorlayer/metric/tensorflow_metric.py new file mode 100644 index 000000000..d7398ffcc --- /dev/null +++ b/tensorlayer/metric/tensorflow_metric.py @@ -0,0 +1,98 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf +from tensorflow.keras.metrics import Metric + +__all__ = [ + 'Accuracy', + 'Auc', + 'Precision', + 'Recall', +] + + +class Accuracy(object): + + def __init__(self, topk=1): + self.topk = topk + if topk == 1: + self.accuary = tf.keras.metrics.Accuracy() + else: + self.accuary = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=topk) + + def update(self, y_pred, y_true): + + if self.topk == 1: + y_pred = tf.argmax(y_pred, axis=1) + self.accuary.update_state(y_true, y_pred) + else: + self.accuary.update_state(y_true, y_pred) + + def result(self): + + return self.accuary.result() + + def reset(self): + + self.accuary.reset_states() + + +class Auc(object): + + def __init__( + self, + curve='ROC', + num_thresholds=200, + ): + self.auc = tf.keras.metrics.AUC(num_thresholds=num_thresholds, curve=curve) + + def update(self, y_pred, y_true): + + self.auc.update_state(y_true, y_pred) + + def result(self): + + return self.auc.result() + + def reset(self): + + self.auc.reset_states() + + +class Precision(object): + + def __init__(self): + + self.precision = tf.keras.metrics.Precision() + + def update(self, y_pred, y_true): + + self.precision.update_state(y_true, y_pred) + + def result(self): + + return self.precision.result() + + def reset(self): + + self.precision.reset_states() + + +class Recall(object): + + def __init__(self): + + self.recall = tf.keras.metrics.Recall() + + def update(self, y_pred, y_true): + + self.recall.update_state(y_true, y_pred) + + def result(self): + + return self.recall.result() + + def reset(self): + + self.recall.reset_states() diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 7d760087a..35e7d61d5 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -64,10 +64,7 @@ class Model: >>> model.train(2, dataset) """ - def __init__( - self, network, loss_fn=None, optimizer=None, metrics=None, - **kwargs - ): + def __init__(self, network, loss_fn=None, optimizer=None, metrics=None, **kwargs): self.network = network self.loss_fn = loss_fn self.optimizer = optimizer @@ -270,7 +267,9 @@ def tf_train( train_loss += _loss_ce if metrics: - train_acc += metrics(_logits, y_batch) + metrics.update(_logits, y_batch) + train_acc += metrics.result() + metrics.reset() else: train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 @@ -294,7 +293,9 @@ def tf_train( _logits = network(X_batch) # is_train=False, disable dropout val_loss += loss_fn(_logits, y_batch, name='eval_loss') if metrics: - val_acc += metrics(_logits, y_batch) + metrics.update(_logits, y_batch) + val_acc += metrics.result() + metrics.reset() else: val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 @@ -319,7 +320,9 @@ def ms_train( loss = loss_output.asnumpy() train_loss += loss if metrics: - train_acc += metrics(output, y_batch) + metrics.update(output, y_batch) + train_acc += metrics.result() + metrics.reset() else: train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) n_iter += 1 @@ -343,14 +346,15 @@ def ms_train( _logits = network(X_batch) val_loss += loss_fn(_logits, y_batch, name='eval_loss') if metrics: - val_acc += metrics(_logits, y_batch) + metrics.update(_logits, y_batch) + val_acc += metrics.result() + metrics.reset() else: - val_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) + val_acc += np.mean((P.Equal()(P.Argmax(axis=1)(_logits), y_batch).asnumpy())) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) print(" val acc: {}".format(val_acc / n_iter)) - def pd_train( self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, print_freq, test_dataset @@ -365,26 +369,27 @@ def pd_train( output = network(X_batch) loss = loss_fn(output, y_batch) loss_ce = loss.numpy() - loss.backward() - optimizer.step() - optimizer.clear_grad() + params_grads = optimizer.gradient(loss, network.trainable_weights) + optimizer.apply_gradients(params_grads) train_loss += loss_ce if metrics: - train_acc += metrics(output, y_batch) + metrics.update(output, y_batch) + train_acc += metrics.result() + metrics.reset() else: - train_acc += pd.metric.accuracy(output, y_batch) + train_acc += pd.metric.accuracy(output, y_batch) n_iter += 1 if print_train_batch: print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc.numpy() / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) print(" train loss: {}".format(train_loss / n_iter)) - print(" train acc: {}".format(train_acc.numpy() / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch @@ -395,7 +400,9 @@ def pd_train( _logits = network(X_batch) # is_train=False, disable dropout val_loss += loss_fn(_logits, y_batch, name='eval_loss') if metrics: - val_acc += metrics(_logits, y_batch) + metrics.update(_logits, y_batch) + val_acc += metrics.result() + metrics.reset() else: val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 diff --git a/tensorlayer/optimizers/__init__.py b/tensorlayer/optimizers/__init__.py index ffe9995da..9d654bbfb 100644 --- a/tensorlayer/optimizers/__init__.py +++ b/tensorlayer/optimizers/__init__.py @@ -11,11 +11,11 @@ from .amsgrad import AMSGrad -# ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] +# ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] from .load_optimizers_backend import Adadelta from .load_optimizers_backend import Adagrad from .load_optimizers_backend import Adam -from .load_optimizers_backend import Admax +from .load_optimizers_backend import Adamax from .load_optimizers_backend import Ftrl from .load_optimizers_backend import Nadam from .load_optimizers_backend import RMSprop diff --git a/tensorlayer/optimizers/mindspore_optimizers.py b/tensorlayer/optimizers/mindspore_optimizers.py index 659a74937..dd70e5fe2 100644 --- a/tensorlayer/optimizers/mindspore_optimizers.py +++ b/tensorlayer/optimizers/mindspore_optimizers.py @@ -6,7 +6,7 @@ import mindspore as ms from mindspore.nn import Cell -__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] class Adadelta(Cell): @@ -50,13 +50,13 @@ def apply_gradients(self, grads_and_vars): optimizer_adam(grads) -class Admax(Cell): +class Adamax(Cell): def __init__(self): pass def app_gradients(self): - raise Exception('Admax optimizer function not implemented') + raise Exception('Adamax optimizer function not implemented') class Ftrl(Cell): diff --git a/tensorlayer/optimizers/paddle_optimizers.py b/tensorlayer/optimizers/paddle_optimizers.py index cbc1c2a85..c963b5a48 100644 --- a/tensorlayer/optimizers/paddle_optimizers.py +++ b/tensorlayer/optimizers/paddle_optimizers.py @@ -1,44 +1,347 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - from __future__ import absolute_import, division, print_function +import paddle +from paddle.optimizer import Optimizer + +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + + +class Adadelta(Optimizer): + + def __init__(self, learning_rate=0.001, epsilon=1.0e-6, rho=0.95): + if learning_rate is None: + raise ValueError('learn_rate is not set.') + if epsilon is None: + raise ValueError('epsilon is not set.') + if rho is None: + raise ValueError('rho is not set') + self.learning_rate = learning_rate + self.epsilon = epsilon + self.rho = rho + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + + self.adadelta = paddle.optimizer.Adadelta( + learning_rate=self.learning_rate, epsilon=self.epsilon, rho=self.rho, parameters=weights + ) + loss.backward() + weights_and_grads = self.adadelta.backward(loss=loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.adadelta._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.adadelta.clear_grad() + + +class Adagrad(Optimizer): + + def __init__(self, learning_rate, initial_accumulator_value=0.0, epsilon=1.0e-6): + + if learning_rate is None: + raise ValueError('learning_rate is not set.') + if initial_accumulator_value is None: + raise ValueError('initial_accumulator_value is not set.') + if epsilon is None: + raise ValueError('epsilon is not set.') + + self.learning_rate = learning_rate + self.initial_accumulator_value = initial_accumulator_value + self.epsilon = epsilon + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + self.adagrad = paddle.optimizer.Adagrad( + learning_rate=self.learning_rate, epsilon=self.epsilon, + initial_accumulator_value=self.initial_accumulator_value, parameters=weights + ) + loss.backward() + weights_and_grads = self.adagrad.backward(loss=loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.adagrad._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.adagrad.clear_grad() + + +class Adam(Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1.0e-8): + + if learning_rate is None: + raise ValueError('learning_rate is not set.') + if beta_1 is None: + raise ValueError('beta_1 is not set.') + if beta_2 is None: + raise ValueError('beta_2 is not set.') + if epsilon is None: + raise ValueError('epsilon is not set.') + + if not 0 <= beta_1 < 1: + raise ValueError("Invaild value of beta1, expect beta1 in [0,1).") + if not 0 <= beta_2 < 1: + raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") + + self.learning_rate = learning_rate + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + self.adam = paddle.optimizer.Adam( + learning_rate=self.learning_rate, beta1=self.beta_1, beta2=self.beta_2, epsilon=self.epsilon, + parameters=weights + ) + loss.backward() + weights_and_grads = self.adam.backward(loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.adam._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.adam.clear_grad() + + +class Adamax(Optimizer): + + def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1.0e-8): + + if learning_rate is None: + raise ValueError('learning_rate is not set.') + if beta_1 is None: + raise ValueError('beta_1 is not set.') + if beta_2 is None: + raise ValueError('beta_2 is not set.') + if epsilon is None: + raise ValueError('epsilon is not set.') + + if not 0 <= beta_1 < 1: + raise ValueError("Invaild value of beta1, expect beta1 in [0,1).") + if not 0 <= beta_2 < 1: + raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") + + self.learning_rate = learning_rate + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + self.adamax = paddle.optimizer.Adamax( + learning_rate=self.learning_rate, beta1=self.beta_1, beta2=self.beta_2, epsilon=self.epsilon, + parameters=weights + ) + loss.backward() + weights_and_grads = self.adamax.backward(loss=loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.adamax._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.adamax.clear_grad() + + +class Ftrl(Optimizer): + + def __init__(self): + + raise Exception('Ftrl optimizer function not implemented') + + +class Nadam(Optimizer): + + def __init__(self): + + raise Exception('Nadam optimizer function not implemented') + + +class RMSprop(Optimizer): + + def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1.0e-6, momentum=0.0, centered=False): + if learning_rate is None: + raise ValueError("learning_rate is not set.") + if rho is None: + raise ValueError("rho is not set.") + if epsilon is None: + raise ValueError("epsilon is not set.") + if momentum is None: + raise ValueError("momentum is not set.") + if not 0.0 <= epsilon: + raise ValueError("Invalid value of epsilon, expect epsilon >= 0.") + if not 0.0 <= momentum: + raise ValueError("Invalid value of momentum, expect momentum >= 0.") + if not 0.0 <= rho: + raise ValueError("Invalid value of rho, expect rho >= 0.") + + self.learning_rate = learning_rate + self.epsilon = epsilon + self.rho = rho + self.momentum = momentum + self.centered = centered + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + + self.rmsprop = paddle.optimizer.RMSProp( + learning_rate=self.learning_rate, epsilon=self.epsilon, rho=self.rho, momentum=self.momentum, + parameters=weights + ) + loss.backward() + weights_and_grads = self.rmsprop.backward(loss=loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.rmsprop._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.rmsprop.clear_grad() + + +class SGD(Optimizer): + + def __init__(self, learning_rate=0.001): + if learning_rate is None: + raise ValueError("learning_rate is not set.") + + self.learning_rate = learning_rate + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + + self.sgd = paddle.optimizer.SGD(learning_rate=self.learning_rate, parameters=weights) + loss.backward() + weights_and_grads = self.sgd.backward(loss=loss, parameters=weights) + + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.sgd._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.sgd.clear_grad() + + +class Momentum(Optimizer): + + def __init__(self, learning_rate=0.001, momentum=0.9, nesterov=False): + if learning_rate is None: + raise ValueError("learning_rate is not set") + if momentum is None: + raise ValueError("momentum is not set") + + self.learning_rate = learning_rate + self.momentum = momentum + self.nesterov = nesterov + + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') + + self.moment = paddle.optimizer.Momentum( + learning_rate=self.learning_rate, momentum=self.momentum, parameters=weights, use_nesterov=self.nesterov + ) + loss.backward() + weights_and_grads = self.moment.backward(loss=loss, parameters=weights) + return weights_and_grads + + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.moment._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.moment.clear_grad() + + +class Lamb(Optimizer): + + def __init__(self, learning_rate=0.001, lamb_weight_decay=0.01, beta_1=0.9, beta_2=0.999, epsilon=1.0e-6): -__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] + if learning_rate is None: + raise ValueError('learning_rate is not set.') + if lamb_weight_decay is None: + raise ValueError('lamb_weight_decay is not set.') + if beta_1 is None: + raise ValueError('beta_1 is not set.') + if beta_2 is None: + raise ValueError('beta_2 is not set.') + if epsilon is None: + raise ValueError('epsilon is not set.') -# Add module aliases + if not 0 <= beta_1 < 1: + raise ValueError("Invaild value of beta1, expect beta1 in [0,1).") + if not 0 <= beta_2 < 1: + raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") -# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' -Adadelta = None + self.learning_rate = learning_rate + self.lamb_weight_decay = lamb_weight_decay + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon -# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' -Adagrad = None + def gradient(self, loss, weights): + if loss is None: + raise ValueError('loss is not set.') + if weights is None: + raise ValueError('weights is not set.') -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' -Adam = None + self.lamb = paddle.optimizer.Lamb( + learning_rate=self.learning_rate, lamb_weight_decay=self.lamb_weight_decay, beta1=self.beta_1, + beta2=self.beta_2, epsilon=self.epsilon, parameters=weights + ) + loss.backward() + weights_and_grads = self.lamb.backward(loss=loss, parameters=weights) -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' -Admax = None + return weights_and_grads -# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, -# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 -Ftrl = None + def apply_gradients(self, weights_and_grads): + if weights_and_grads is None: + raise ValueError('weights_and_grads is not set.') + self.lamb._apply_optimize(loss=None, startup_program=None, params_grads=weights_and_grads) + self.lamb.clear_grad() -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', -Nadam = None -# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' -RMSprop = None +class LARS(Optimizer): -# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' -SGD = None + def __init__(self): -# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False -Momentum = None + pass + def gradient(self): -def Lamb(**kwargs): - raise Exception('Lamb optimizer function not implemented') + pass + def apply_gradients(self, weights_and_grads): -def LARS(**kwargs): - raise Exception('LARS optimizer function not implemented') + raise Exception('LARS optimizer function not implemented') diff --git a/tensorlayer/optimizers/tensorflow_optimizers.py b/tensorlayer/optimizers/tensorflow_optimizers.py index 0cae4cc8a..971df3826 100644 --- a/tensorlayer/optimizers/tensorflow_optimizers.py +++ b/tensorlayer/optimizers/tensorflow_optimizers.py @@ -4,7 +4,7 @@ from __future__ import absolute_import, division, print_function import tensorflow as tf -__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] +__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] # Add module aliases @@ -18,7 +18,7 @@ Adam = tf.optimizers.Adam # learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' -Admax = tf.optimizers.Adamax +Adamax = tf.optimizers.Adamax # learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, # l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 diff --git a/tests/dataflow/__init__.py b/tests/dataflow/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/dataflow/test_dataflow_image.py b/tests/dataflow/test_dataflow_image.py new file mode 100644 index 000000000..dcdf64db4 --- /dev/null +++ b/tests/dataflow/test_dataflow_image.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os +import unittest + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import tensorlayer as tl + +from tests.utils import CustomTestCase + + +class Dataflow_Image_Test(CustomTestCase): + + @classmethod + def setUpClass(self): + self.input_shape = [1, 100, 100, 3] + self.input_layer = tl.layers.Input(self.input_shape, name='input_layer') + self.input_shape_1 = [100, 100, 3] + self.input_layer_1 = tl.layers.Input(self.input_shape_1, name='input_layer_1') + + self.centralcrop_1 = tl.dataflow.image.CentralCrop(self.input_layer, central_fraction=0.5) + self.centralcrop_2 = tl.dataflow.image.CentralCrop(self.input_layer, size=60) + + self.hsvtorgb = tl.dataflow.image.HsvToRgb(self.input_layer) + + self.adjustbrightness = tl.dataflow.image.AdjustBrightness(self.input_layer, factor=0.5) + self.adjustconstrast = tl.dataflow.image.AdjustContrast(self.input_layer, factor=0.5) + self.adjusthue = tl.dataflow.image.AdjustHue(self.input_layer, factor=0.5) + self.adjustsaturation = tl.dataflow.image.AdjustSaturation(self.input_layer, factor=0.5) + + self.crop = tl.dataflow.image.Crop( + self.input_layer, offset_height=20, offset_width=20, target_height=60, target_width=60 + ) + + self.fliphorizontal = tl.dataflow.image.FlipHorizontal(self.input_layer) + self.flipvertical = tl.dataflow.image.FlipVertical(self.input_layer) + + self.rgbtogray = tl.dataflow.image.RgbToGray(self.input_layer) + self.graytorgb = tl.dataflow.image.GrayToRgb(self.rgbtogray) + + self.padtoboundingbox = tl.dataflow.image.PadToBoundingbox( + self.input_layer, offset_height=20, offset_width=20, target_height=150, target_width=150 + ) + + self.pad_1 = tl.dataflow.image.Pad(self.input_layer, padding=10, padding_value=1, mode='constant') + self.pad_2 = tl.dataflow.image.Pad(self.input_layer, padding=(10, 10), mode='REFLECT') + self.pad_3 = tl.dataflow.image.Pad(self.input_layer, padding=(10, 20, 30, 40), mode='SYMMETRIC') + + self.standardization_1 = tl.dataflow.image.Standardization( + self.input_layer, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) + ) + self.standardization_2 = tl.dataflow.image.Standardization(self.input_layer, channel_mode=False) + self.standardization_3 = tl.dataflow.image.Standardization(self.input_layer, channel_mode=True) + + self.randombrightness = tl.dataflow.image.RandomBrightness(self.input_layer, factor=0.5) + self.randomcontrast = tl.dataflow.image.RandomContrast(self.input_layer, lower=0.2, upper=0.5) + self.randomhue = tl.dataflow.image.RandomHue(self.input_layer, factor=0.5) + self.randomsaturation = tl.dataflow.image.RandomSaturation(self.input_layer, lower=0.2, upper=0.5) + + self.randomcrop_1 = tl.dataflow.image.RandomCrop(self.input_layer, size=50) + self.randomcrop_2 = tl.dataflow.image.RandomCrop(self.input_layer, size=(50, 60)) + + self.resize_1 = tl.dataflow.image.Resize( + self.input_layer, size=46, method='bilinear', preserve_aspect_ratio=False, antialias=True + ) + + self.resize_2 = tl.dataflow.image.Resize( + self.input_layer, size=(32, 45), method='bilinear', preserve_aspect_ratio=True, antialias=False + ) + + self.croporpad = tl.dataflow.image.CropOrPad(self.input_layer, target_height=50, target_width=150) + self.resizeandpad = tl.dataflow.image.ResizeAndPad( + self.input_layer, target_height=50, target_width=150, method='bilinear' + ) + self.rgbtohsv = tl.dataflow.image.RgbToHsv(self.input_layer) + self.transpose = tl.dataflow.image.Transpose(self.input_layer, order=(3, 2, 1, 0)) + self.randomrotation = tl.dataflow.image.RandomRotation( + self.input_layer_1, degrees=60, fill_mode='nearest', fill_value=1 + ) + self.randomshift_1 = tl.dataflow.image.RandomShift( + self.input_layer_1, shift=0.5, fill_mode='nearest', fill_value=0 + ) + self.randomshift_2 = tl.dataflow.image.RandomShift( + self.input_layer_1, shift=(0.5, 0.4), fill_mode='nearest', fill_value=0 + ) + + self.randomshear = tl.dataflow.image.RandomShear( + self.input_layer_1, degree=30, fill_mode='nearest', fill_value=1 + ) + + self.randomzoom_1 = tl.dataflow.image.RandomZoom( + self.input_layer_1, zoom_range=0.5, fill_mode='nearest', fill_value=1 + ) + self.randomzoom_2 = tl.dataflow.image.RandomZoom( + self.input_layer_1, zoom_range=(0.5, 0.4), fill_mode='nearest', fill_value=1 + ) + + self.rescale = tl.dataflow.image.Rescale(self.input_layer, scale=3, offset=4) + self.randomflipvertical = tl.dataflow.image.RandomFlipVertical(self.input_layer) + self.randomfliphorizontal = tl.dataflow.image.RandomFlipHorizontal(self.input_layer) + self.hwc2chw = tl.dataflow.image.HWC2CHW(self.input_layer) + self.chw2hwc = tl.dataflow.image.CHW2HWC(self.hwc2chw) + + @classmethod + def tearDownClass(self): + pass + + def test_centralcrop_1(self): + + self.assertEqual(tl.get_tensor_shape(self.centralcrop_1), [1, 50, 50, 3]) + + def test_centralcrop_2(self): + + self.assertEqual(tl.get_tensor_shape(self.centralcrop_2), [1, 60, 60, 3]) + + def test_hsvtorgb(self): + + self.assertEqual(tl.get_tensor_shape(self.hsvtorgb), [1, 100, 100, 3]) + + def test_adjustbrightness(self): + + self.assertEqual(tl.get_tensor_shape(self.adjustbrightness), [1, 100, 100, 3]) + + def test_adjustconstrast(self): + + self.assertEqual(tl.get_tensor_shape(self.adjustconstrast), [1, 100, 100, 3]) + + def test_adjusthue(self): + + self.assertEqual(tl.get_tensor_shape(self.adjusthue), [1, 100, 100, 3]) + + def test_adjustsaturation(self): + + self.assertEqual(tl.get_tensor_shape(self.adjustsaturation), [1, 100, 100, 3]) + + def test_crop(self): + + self.assertEqual(tl.get_tensor_shape(self.crop), [1, 60, 60, 3]) + + def test_fliphorizontal(self): + + self.assertEqual(tl.get_tensor_shape(self.fliphorizontal), [1, 100, 100, 3]) + + def test_flipvertical(self): + + self.assertEqual(tl.get_tensor_shape(self.flipvertical), [1, 100, 100, 3]) + + def test_rgbtogray(self): + + self.assertEqual(tl.get_tensor_shape(self.rgbtogray), [1, 100, 100, 1]) + + def test_graytorgb(self): + + self.assertEqual(tl.get_tensor_shape(self.graytorgb), [1, 100, 100, 3]) + + def test_padtoboundingbox(self): + + self.assertEqual(tl.get_tensor_shape(self.padtoboundingbox), [1, 150, 150, 3]) + + def test_pad_1(self): + + self.assertEqual(tl.get_tensor_shape(self.pad_1), [1, 120, 120, 3]) + + def test_pad_2(self): + + self.assertEqual(tl.get_tensor_shape(self.pad_2), [1, 120, 120, 3]) + + def test_pad_3(self): + + self.assertEqual(tl.get_tensor_shape(self.pad_3), [1, 130, 170, 3]) + + def test_standardization_1(self): + + self.assertEqual(tl.get_tensor_shape(self.standardization_1), [1, 100, 100, 3]) + + def test_standardization_2(self): + + self.assertEqual(tl.get_tensor_shape(self.standardization_2), [1, 100, 100, 3]) + + def test_standardization_3(self): + + self.assertEqual(tl.get_tensor_shape(self.standardization_3), [1, 100, 100, 3]) + + def test_randomcontrast(self): + + self.assertEqual(tl.get_tensor_shape(self.randomcontrast), [1, 100, 100, 3]) + + def test_randomhue(self): + + self.assertEqual(tl.get_tensor_shape(self.randomhue), [1, 100, 100, 3]) + + def test_randomsaturation(self): + + self.assertEqual(tl.get_tensor_shape(self.randomsaturation), [1, 100, 100, 3]) + + def test_randomcrop_1(self): + + self.assertEqual(tl.get_tensor_shape(self.randomcrop_1), [1, 50, 50, 3]) + + def test_randomcrop_2(self): + + self.assertEqual(tl.get_tensor_shape(self.randomcrop_2), [1, 50, 60, 3]) + + def test_resize_1(self): + + self.assertEqual(tl.get_tensor_shape(self.resize_1), [1, 46, 46, 3]) + + def test_resize_2(self): + + self.assertEqual(tl.get_tensor_shape(self.resize_2), [1, 32, 32, 3]) + + def test_croporpad(self): + + self.assertEqual(tl.get_tensor_shape(self.croporpad), [1, 50, 150, 3]) + + def test_resizeandpad(self): + + self.assertEqual(tl.get_tensor_shape(self.resizeandpad), [1, 50, 150, 3]) + + def test_rgbtohsv(self): + + self.assertEqual(tl.get_tensor_shape(self.rgbtohsv), [1, 100, 100, 3]) + + def test_transpose(self): + + self.assertEqual(tl.get_tensor_shape(self.transpose), [3, 100, 100, 1]) + + def test_randomrotation(self): + + self.assertEqual(tl.get_tensor_shape(self.randomrotation), [100, 100, 3]) + + def test_randomshift_1(self): + + self.assertEqual(tl.get_tensor_shape(self.randomshift_1), [100, 100, 3]) + + def test_randomshift_2(self): + + self.assertEqual(tl.get_tensor_shape(self.randomshift_2), [100, 100, 3]) + + def test_randoshear(self): + + self.assertEqual(tl.get_tensor_shape(self.randomshear), [100, 100, 3]) + + def test_randomzoom_1(self): + + self.assertEqual(tl.get_tensor_shape(self.randomzoom_1), [100, 100, 3]) + + def test_randomzoom_2(self): + + self.assertEqual(tl.get_tensor_shape(self.randomzoom_2), [100, 100, 3]) + + def test_rescale(self): + + self.assertEqual(tl.get_tensor_shape(self.rescale), [1, 100, 100, 3]) + + def test_randomflipvertical(self): + + self.assertEqual(tl.get_tensor_shape(self.randomflipvertical), [1, 100, 100, 3]) + + def test_randomfliphorizontal(self): + + self.assertEqual(tl.get_tensor_shape(self.randomfliphorizontal), [1, 100, 100, 3]) + + def test_hwc2chw(self): + + self.assertEqual(tl.get_tensor_shape(self.hwc2chw), [1, 3, 100, 100]) + + def test_chw2hwc(self): + + self.assertEqual(tl.get_tensor_shape(self.chw2hwc), [1, 100, 100, 3]) + + +if __name__ == '__main__': + + tl.logging.set_verbosity(tl.logging.DEBUG) + + unittest.main() diff --git a/tests/layers/test_layers_convolution.py b/tests/layers/test_layers_convolution.py index 20fb15afc..df2f69c36 100644 --- a/tests/layers/test_layers_convolution.py +++ b/tests/layers/test_layers_convolution.py @@ -29,6 +29,19 @@ def setUpClass(self): self.dconv1dlayer1 = tl.layers.DeConv1d(n_filter=64, in_channels=32, filter_size=5, name='deconv1dlayer') self.n3 = self.dconv1dlayer1(self.n2) + self.separableconv1d1 = tl.layers.SeparableConv1d(in_channels=1, n_filter=16, filter_size=3, stride=2) + self.n4 = self.separableconv1d1(self.input_layer) + + self.separableconv1d2 = tl.layers.SeparableConv1d( + in_channels=1, n_filter=16, filter_size=3, stride=2, depth_multiplier=4 + ) + self.n5 = self.separableconv1d2(self.input_layer) + + self.separableconv1d3 = tl.layers.SeparableConv1d( + in_channels=1, n_filter=16, filter_size=3, stride=2, depth_multiplier=4, b_init=None + ) + self.n6 = self.separableconv1d3(self.input_layer) + @classmethod def tearDownClass(self): pass @@ -45,6 +58,18 @@ def test_layer_n3(self): self.assertEqual(len(self.dconv1dlayer1.all_weights), 2) self.assertEqual(tl.get_tensor_shape(self.n3), [self.batch_size, 25, 64]) + def test_layer_n4(self): + self.assertEqual(len(self.separableconv1d1.all_weights), 3) + self.assertEqual(tl.get_tensor_shape(self.n4), [self.batch_size, 50, 16]) + + def test_layer_n5(self): + self.assertEqual(len(self.separableconv1d2.all_weights), 3) + self.assertEqual(tl.get_tensor_shape(self.n5), [self.batch_size, 50, 16]) + + def test_layer_n6(self): + self.assertEqual(len(self.separableconv1d3.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n6), [self.batch_size, 50, 16]) + class Layer_Convolution_2D_Test(CustomTestCase): @@ -55,29 +80,58 @@ def setUpClass(self): self.inputs_shape = [self.batch_size, 400, 400, 3] self.input_layer = tl.layers.Input(self.inputs_shape, name='input_layer') - self.conv2dlayer1 = tl.layers.Conv2d(n_filter=32, in_channels=3, strides=(2, 2), filter_size=(5, 5), - padding='SAME', b_init=tl.initializers.truncated_normal(0.01), name='conv2dlayer' + self.conv2dlayer1 = tl.layers.Conv2d( + n_filter=32, in_channels=3, strides=(2, 2), filter_size=(5, 5), padding='SAME', + b_init=tl.initializers.truncated_normal(0.01), name='conv2dlayer' ) self.n1 = self.conv2dlayer1(self.input_layer) - self.conv2dlayer2 = tl.layers.Conv2d(n_filter=32, in_channels=32, filter_size=(3, 3), - strides=(2, 2), act=None, name='conv2d') + self.conv2dlayer2 = tl.layers.Conv2d( + n_filter=32, in_channels=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d' + ) self.n2 = self.conv2dlayer2(self.n1) - self.conv2dlayer3 = tl.layers.Conv2d(in_channels=32, n_filter=32, filter_size=(3, 3), strides=(2, 2), - act=tl.ReLU, b_init=None, name='conv2d_no_bias' + self.conv2dlayer3 = tl.layers.Conv2d( + in_channels=32, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, b_init=None, + name='conv2d_no_bias' ) self.n3 = self.conv2dlayer3(self.n2) - self.dconv2dlayer = tl.layers.DeConv2d(n_filter=32, in_channels=32, filter_size=(5, 5), strides=(2, 2), - name='deconv2dlayer' + self.dconv2dlayer = tl.layers.DeConv2d( + n_filter=32, in_channels=32, filter_size=(5, 5), strides=(2, 2), name='deconv2dlayer' ) self.n4 = self.dconv2dlayer(self.n3) - self.dwconv2dlayer = tl.layers.DepthwiseConv2d(in_channels=32, filter_size=(3, 3), strides=(1, 1), - dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=2,name='depthwise') + self.dwconv2dlayer = tl.layers.DepthwiseConv2d( + in_channels=32, filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=2, + name='depthwise' + ) self.n5 = self.dwconv2dlayer(self.n4) + self.separableconv2d = tl.layers.SeparableConv2d( + in_channels=3, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=3, + name='separableconv2d' + ) + self.n6 = self.separableconv2d(self.input_layer) + + self.groupconv2d = tl.layers.GroupConv2d( + in_channels=3, n_filter=18, filter_size=(3, 3), strides=(2, 2), dilation_rate=(3, 3), n_group=3, + act=tl.ReLU, name='groupconv2d' + ) + self.n7 = self.groupconv2d(self.input_layer) + + self.binaryconv2d = tl.layers.BinaryConv2d( + in_channels=3, n_filter=32, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), act=tl.ReLU, + name='binaryconv2d' + ) + self.n8 = self.binaryconv2d(self.input_layer) + + self.dorefaconv2d = tl.layers.DorefaConv2d( + bitA=2, bitW=8, in_channels=3, n_filter=16, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), + act=tl.ReLU, name='dorefaconv2d' + ) + self.n9 = self.dorefaconv2d(self.input_layer) + @classmethod def tearDownClass(cls): pass @@ -103,6 +157,22 @@ def test_layer_n5(self): self.assertEqual(len(self.dwconv2dlayer.all_weights), 2) self.assertEqual(tl.get_tensor_shape(self.n5), [self.batch_size, 100, 100, 64]) + def test_layer_n6(self): + self.assertEqual(len(self.separableconv2d.all_weights), 3) + self.assertEqual(tl.get_tensor_shape(self.n6), [self.batch_size, 198, 198, 32]) + + def test_layer_n7(self): + self.assertEqual(len(self.groupconv2d.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n7), [self.batch_size, 200, 200, 18]) + + def test_layer_n8(self): + self.assertEqual(len(self.binaryconv2d.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n8), [self.batch_size, 198, 198, 32]) + + def test_layer_n9(self): + self.assertEqual(len(self.dorefaconv2d.all_weights), 2) + self.assertEqual(tl.get_tensor_shape(self.n9), [self.batch_size, 200, 200, 16]) + class Layer_Convolution_3D_Test(CustomTestCase): @@ -117,12 +187,13 @@ def setUpClass(self): self.conv3dlayer1 = tl.layers.Conv3d(n_filter=32, in_channels=3, filter_size=(2, 2, 2), strides=(2, 2, 2)) self.n1 = self.conv3dlayer1(self.input_layer) - self.deconv3dlayer = tl.layers.DeConv3d(n_filter=128, in_channels=32, filter_size=(2, 2, 2), strides=(2, 2, 2) - ) + self.deconv3dlayer = tl.layers.DeConv3d(n_filter=128, in_channels=32, filter_size=(2, 2, 2), strides=(2, 2, 2)) self.n2 = self.deconv3dlayer(self.n1) - self.conv3dlayer2 = tl.layers.Conv3d(n_filter=64, in_channels=128,filter_size=(3, 3, 3), strides=(3, 3, 3), - act=tl.ReLU, b_init=None, name='conv3d_no_bias') + self.conv3dlayer2 = tl.layers.Conv3d( + n_filter=64, in_channels=128, filter_size=(3, 3, 3), strides=(3, 3, 3), act=tl.ReLU, b_init=None, + name='conv3d_no_bias' + ) self.n3 = self.conv3dlayer2(self.n2) @classmethod diff --git a/tests/layers/test_layers_pooling.py b/tests/layers/test_layers_pooling.py index 39582aa28..65643fca9 100644 --- a/tests/layers/test_layers_pooling.py +++ b/tests/layers/test_layers_pooling.py @@ -30,6 +30,8 @@ def setUpClass(cls): n16 = tl.layers.MaxPool1d(filter_size=3, strides=1, padding='VALID', dilation_rate=2, name='test_maxpool1d')(n1) n17 = tl.layers.MeanPool1d(filter_size=3, strides=1, padding='VALID', dilation_rate=2, name='test_meanpool1d')(n1) + n19 = tl.layers.AdaptiveMeanPool1d(output_size=44, name='test_adaptivemeanpool1d')(n1) + n20 = tl.layers.AdaptiveMaxPool1d(output_size=44, name='test_adaptivemaxpool1d')(n1) cls.n1_shape = n1.get_shape().as_list() cls.n2_shape = n2.get_shape().as_list() @@ -38,6 +40,8 @@ def setUpClass(cls): cls.n5_shape = n5.get_shape().as_list() cls.n16_shape = n16.get_shape().as_list() cls.n17_shape = n17.get_shape().as_list() + cls.n19_shape = n19.get_shape().as_list() + cls.n20_shape = n20.get_shape().as_list() ## 2D ======================================================================== @@ -51,6 +55,8 @@ def setUpClass(cls): n10 = tl.layers.GlobalMeanPool2d(name='test_meanpool2d')(n6) n15 = tl.layers.PoolLayer(name='test_pool2d')(n6) # n18 = tl.layers.CornerPool2d('TopLeft', name='test_cornerpool2d')(n6) + n21 = tl.layers.AdaptiveMeanPool2d(output_size=(45, 32), name='test_adaptivemeanpool2d')(n6) + n22 = tl.layers.AdaptiveMaxPool2d(output_size=(45, 32), name='test_adaptivemaxpool2d')(n6) cls.n6_shape = n6.get_shape().as_list() cls.n7_shape = n7.get_shape().as_list() @@ -59,7 +65,8 @@ def setUpClass(cls): cls.n10_shape = n10.get_shape().as_list() cls.n15_shape = n15.get_shape().as_list() # cls.n18_shape = n18.get_shape().as_list() - + cls.n21_shape = n21.get_shape().as_list() + cls.n22_shape = n22.get_shape().as_list() ## 3D ======================================================================== @@ -73,10 +80,17 @@ def setUpClass(cls): n14 = tl.layers.MaxPool3d(filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME', name='test_maxpool3d')(nin_3) + n23 = tl.layers.AdaptiveMeanPool3d(output_size=(45, 32, 55), name='test_adaptivemeanpool3d')(nin_3) + n24 = tl.layers.AdaptiveMaxPool3d(output_size=(45, 32, 55), name='test_adaptivemaxpool3d')(nin_3) + cls.n11_shape = n11.get_shape().as_list() cls.n12_shape = n12.get_shape().as_list() cls.n13_shape = n13.get_shape().as_list() cls.n14_shape = n14.get_shape().as_list() + cls.n21_shape = n21.get_shape().as_list() + cls.n22_shape = n22.get_shape().as_list() + cls.n23_shape = n23.get_shape().as_list() + cls.n24_shape = n24.get_shape().as_list() @classmethod def tearDownClass(cls): @@ -134,6 +148,24 @@ def test_n16_shape(self): def test_n17_shape(self): self.assertEqual(self.n17_shape[1:4], [46, 32]) + def test_n19_shape(self): + self.assertEqual(self.n19_shape[1:3], [44, 32]) + + def test_n20_shape(self): + self.assertEqual(self.n20_shape[1:3], [44, 32]) + + def test_n21_shape(self): + self.assertEqual(self.n21_shape[1:4], [45, 32, 32]) + + def test_n22_shape(self): + self.assertEqual(self.n22_shape[1:4], [45, 32, 32]) + + def test_n23_shape(self): + self.assertEqual(self.n23_shape[1:5], [45, 32, 55, 3]) + + def test_n24_shape(self): + self.assertEqual(self.n24_shape[1:5], [45, 32, 55, 3]) + # def test_n18_shape(self): # self.assertEqual(self.n18_shape[1:], [50, 50, 32]) From a4814623f7984295e2edcf6e24df0acd9501dc7a Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 12 May 2021 17:53:46 +0800 Subject: [PATCH 10/36] update cost --- docs/index.rst | 2 +- docs/modules/app.rst | 10 +++++++ docs/modules/visualize.rst | 4 +++ .../tutorial_paddle_tensorlayer_mlp.py | 2 ++ requirements/requirements_test.txt | 4 +-- tensorlayer/cost/paddle_cost.py | 26 ++++++++++++++++--- tensorlayer/dataflow/tensorflow_data.py | 2 +- tensorlayer/models/core.py | 2 +- 8 files changed, 42 insertions(+), 10 deletions(-) create mode 100644 docs/modules/app.rst diff --git a/docs/index.rst b/docs/index.rst index c08623a76..b4b1fd2b6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -9,7 +9,7 @@ Welcome to TensorLayer **Documentation Version:** |release| -**Jun 2020** `Deep Reinforcement Learning Book Is Coming `__. +**Jun 2020** `Deep Reinforcement Learning Book Is Released `__. **Good News:** We won the **Best Open Source Software Award** `@ACM Multimedia (MM) 2017 `_. diff --git a/docs/modules/app.rst b/docs/modules/app.rst new file mode 100644 index 000000000..d636292e8 --- /dev/null +++ b/docs/modules/app.rst @@ -0,0 +1,10 @@ +API - Application Library +========================= + +Application library is an open source Deep learning applications based on TensorLayer. + +Supported Application: +------------------------- + + + diff --git a/docs/modules/visualize.rst b/docs/modules/visualize.rst index 0bbe02861..0ef8f3b12 100644 --- a/docs/modules/visualize.rst +++ b/docs/modules/visualize.rst @@ -19,6 +19,7 @@ to visualize the model, activations etc. Here we provide more functions for data frame images2d tsne_embedding + draw_boxes_and_labels_to_image_with_json Save and read images @@ -44,6 +45,9 @@ Save image for object detection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: draw_boxes_and_labels_to_image +Save image for object detection with json +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: draw_boxes_and_labels_to_image_with_json Save image for pose estimation (MPII) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py index 274f6be11..ce02d34bd 100644 --- a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py +++ b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py @@ -1,7 +1,9 @@ #! /usr/bin/python # -*- coding: utf-8 -*- + import os os.environ['TL_BACKEND'] = 'paddle' +# os.environ['TL_BACKEND'] = 'tensorflow' import tensorlayer as tl from tensorlayer.layers import Module diff --git a/requirements/requirements_test.txt b/requirements/requirements_test.txt index 9642a41a4..e47c0ed72 100644 --- a/requirements/requirements_test.txt +++ b/requirements/requirements_test.txt @@ -6,6 +6,4 @@ pytest-cache>=1.0,<1.1 pytest-cov>=2.7.1 pytest-xdist>=1.28.0 sphinx==2.0.1 -yapf==0.29.0 -autoflake==1.3.1 -isort==4.3.21 +yapf>=0.27.0 diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py index dc0615bf1..cd66fa705 100644 --- a/tensorlayer/cost/paddle_cost.py +++ b/tensorlayer/cost/paddle_cost.py @@ -232,6 +232,7 @@ def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): """ + axis = list(axis) inse = pd.fluid.layers.reduce_sum(output * target, dim=axis) if loss_type == 'jaccard': l = pd.fluid.layers.reduce_sum(output * output, dim=axis) @@ -271,7 +272,16 @@ def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): """ - raise NotImplementedError("Not Implemented.") + output = pd.cast(output > threshold, dtype='float32') + target = pd.cast(target > threshold, dtype='float32') + inse = pd.fluid.layers.reduce_sum(pd.multiply(output, target), dim=list(axis)) + l = pd.fluid.layers.reduce_sum(output, dim=list(axis)) + r = pd.fluid.layers.reduce_sum(target, dim=list(axis)) + + hard_dice = (2. * inse + smooth) / (l + r + smooth) + ## + hard_dice = pd.fluid.layers.reduce_mean(hard_dice) + return hard_dice def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): @@ -298,7 +308,13 @@ def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): """ - raise NotImplementedError("Not Implemented.") + pre = pd.cast(output > threshold, dtype='float32') + truth = pd.cast(target > threshold, dtype='float32') + inse = pd.fluid.layers.reduce_sum(pd.multiply(pre, truth), dim=axis) # AND + union = pd.fluid.layers.reduce_sum(pd.cast(pd.add(pre, truth) >= 1, dtype='float32'), dim=axis) # OR + batch_iou = (inse + smooth) / (union + smooth) + iou = pd.fluid.layers.reduce_mean(batch_iou, name='iou_coe') + return iou def sequence_loss_by_example( @@ -426,7 +442,9 @@ def cosine_similarity(v1, v2): """ - raise NotImplementedError("Not Implemented.") + return pd.fluid.layers.reduce_sum(pd.multiply(v1, v2), 1) / \ + (pd.sqrt(pd.fluid.layers.reduce_sum(pd.multiply(v1, v1), 1)) * + pd.sqrt(pd.fluid.layers.reduce_sum(pd.multiply(v2, v2), 1))) # Regularization Functions @@ -582,4 +600,4 @@ def huber_loss( """ - raise NotImplementedError("Not Implemented.") + raise NotImplementedError("Not Implemented.") \ No newline at end of file diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py index 8dca50f80..4da229a43 100644 --- a/tensorlayer/dataflow/tensorflow_data.py +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -255,7 +255,7 @@ def Zip(datasets): return tf.data.Dataset.zip(datasets) -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=1024): if shuffle: dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 35e7d61d5..e449af0be 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -369,7 +369,7 @@ def pd_train( output = network(X_batch) loss = loss_fn(output, y_batch) loss_ce = loss.numpy() - params_grads = optimizer.gradient(loss, network.trainable_weights) + params_grads = optimizer.gradient(loss, train_weights) optimizer.apply_gradients(params_grads) train_loss += loss_ce From 3be575d4010b518c62a4b1f0fc4664e6f149c0da Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Thu, 13 May 2021 11:47:00 +0800 Subject: [PATCH 11/36] fix core --- tensorlayer/layers/core/core_paddle.py | 36 +++++++++++++++++++++++--- tensorlayer/package_info.py | 4 +-- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index 19b56ee60..769053f25 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -2,14 +2,14 @@ # -*- coding: utf-8 -*- import copy, six -import tensorlayer as tl from .common import str2act from paddle.fluid import framework from paddle.fluid.dygraph import Layer from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.dygraph.base import program_desc_tracing_guard, param_guard +from paddle.fluid.dygraph import parallel_helper - -_global_layer_name_dict = {} # TODO: better implementation? +_global_layer_name_dict = {} class Module(Layer): @@ -54,7 +54,10 @@ def __init__(self, name=None, act=None, *args, **kwargs): self.act = act # Layer building state - # self._built = False + self._built = False + + # paddl_built + self._paddle_built = False # Layer nodes state self._nodes = [] @@ -160,6 +163,31 @@ def build(self, inputs_shape): def forward(self, *inputs, **kwargs): raise Exception("The forward method must be implemented by inherited class") + def __call__(self, *inputs, **kwargs): + with param_guard(self._parameters), param_guard(self._buffers): + for forward_pre_hook in self._forward_pre_hooks.values(): + hook_result = forward_pre_hook(self, inputs) + if hook_result is not None: + if not isinstance(hook_result, tuple): + hook_result = (hook_result, ) + inputs = hook_result + + if not self._paddle_built: + with program_desc_tracing_guard(False): + self._build_once(*inputs, **kwargs) + if parallel_helper._is_data_parallel_mode(): + parallel_helper._broadcast_parameters( + self._parameters.values()) + self._paddle_built = True + + outputs = self.forward(*inputs, **kwargs) + + for forward_post_hook in self._forward_post_hooks.values(): + hook_result = forward_post_hook(self, inputs, outputs) + if hook_result is not None: + outputs = hook_result + + return outputs def _get_weights(self, var_name, shape, init=None, trainable=True): if var_name in ["filters", "weights"]: diff --git a/tensorlayer/package_info.py b/tensorlayer/package_info.py index de5a88430..1efbae64a 100644 --- a/tensorlayer/package_info.py +++ b/tensorlayer/package_info.py @@ -2,8 +2,8 @@ # -*- coding: utf-8 -*- """Deep learning and Reinforcement learning library for Researchers and Engineers.""" -MAJOR = 2 -MINOR = 2 +MAJOR = 3 +MINOR = 0 PATCH = 0 PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, prerelease) From 291bca84c1e78a212480b3d1a4fa0d9d05c67567 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 16 Jun 2021 09:35:59 +0800 Subject: [PATCH 12/36] update --- ...yerList.py => tutorial_SequentialLayer.py} | 4 +- ...torial_automatic_inference_input _shape.py | 93 ++ ...tutorial_cifar10_cnn_mindspore_backend.py} | 40 +- ...utorial_cifar10_cnn_tensorflow_backend.py} | 5 +- .../tutorial_mnist_mlp_dynamci_dragon.py | 100 -- ...> tutorial_mnist_mlp_mindspore_backend.py} | 0 ... tutorial_mnist_mlp_tensorflow_backend.py} | 4 +- .../basic_tutorials/tutorial_mnist_simple.py | 3 +- .../tutorial_nested_usage_of_Layer.py | 4 +- .../tutorial_paddle_tensorlayer_mlp.py | 5 +- tensorlayer/backend/ops/__init__.py | 8 +- tensorlayer/backend/ops/dragon_backend.py | 1049 ----------------- tensorlayer/backend/ops/dragon_nn.py | 910 -------------- tensorlayer/backend/ops/load_backend.py | 12 +- tensorlayer/backend/ops/mindspore_backend.py | 82 +- tensorlayer/backend/ops/mindspore_nn.py | 468 ++++---- tensorlayer/backend/ops/paddle_backend.py | 11 +- tensorlayer/backend/ops/paddle_nn.py | 21 + tensorlayer/backend/ops/tensorflow_backend.py | 13 +- tensorlayer/backend/ops/tensorflow_nn.py | 32 + tensorlayer/cost/__init__.py | 2 - tensorlayer/files/utils.py | 23 + .../initializers/load_initializers_backend.py | 4 +- .../initializers/mindspore_initializers.py | 256 ++++ tensorlayer/layers/convolution/binary_conv.py | 3 +- .../layers/convolution/deformable_conv.py | 65 +- .../layers/convolution/depthwise_conv.py | 2 +- tensorlayer/layers/convolution/dorefa_conv.py | 2 +- tensorlayer/layers/convolution/quan_conv.py | 2 +- .../layers/convolution/separable_conv.py | 18 +- .../layers/convolution/simplified_conv.py | 21 +- .../layers/convolution/ternary_conv.py | 2 +- tensorlayer/layers/core/common.py | 110 +- tensorlayer/layers/core/core_dragon.py | 765 ------------ tensorlayer/layers/core/core_mindspore.py | 128 +- tensorlayer/layers/core/core_paddle.py | 12 +- tensorlayer/layers/core/core_tensorflow.py | 387 +++--- tensorlayer/layers/deprecated.py | 9 +- tensorlayer/layers/embedding.py | 7 +- tensorlayer/layers/normalization.py | 1 + tensorlayer/layers/pooling.py | 31 +- tensorlayer/metric/__init__.py | 2 - tensorlayer/metric/mindspore_metric.py | 1 - tensorlayer/models/core.py | 103 +- tensorlayer/optimizers/dragon_optimizers.py | 56 - .../optimizers/load_optimizers_backend.py | 2 - tests/layers/test_layers_pooling.py | 35 +- 47 files changed, 1166 insertions(+), 3747 deletions(-) rename examples/basic_tutorials/{tutorial_LayerList.py => tutorial_SequentialLayer.py} (94%) create mode 100644 examples/basic_tutorials/tutorial_automatic_inference_input _shape.py rename examples/basic_tutorials/{tutorial_cifar10_cnn_dynamic_MS_backend.py => tutorial_cifar10_cnn_mindspore_backend.py} (75%) rename examples/basic_tutorials/{tutorial_cifar10_cnn_dynamic_TF_backend.py => tutorial_cifar10_cnn_tensorflow_backend.py} (98%) delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py rename examples/basic_tutorials/{tutorial_mnist_mlp_dynamic_MS_backend.py => tutorial_mnist_mlp_mindspore_backend.py} (100%) rename examples/basic_tutorials/{tutorial_mnist_mlp_dynamic_TF_backend.py => tutorial_mnist_mlp_tensorflow_backend.py} (98%) delete mode 100644 tensorlayer/backend/ops/dragon_backend.py delete mode 100644 tensorlayer/backend/ops/dragon_nn.py create mode 100644 tensorlayer/initializers/mindspore_initializers.py delete mode 100644 tensorlayer/layers/core/core_dragon.py delete mode 100644 tensorlayer/optimizers/dragon_optimizers.py diff --git a/examples/basic_tutorials/tutorial_LayerList.py b/examples/basic_tutorials/tutorial_SequentialLayer.py similarity index 94% rename from examples/basic_tutorials/tutorial_LayerList.py rename to examples/basic_tutorials/tutorial_SequentialLayer.py index 2b60fecf8..dd5e97249 100644 --- a/examples/basic_tutorials/tutorial_LayerList.py +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -from tensorlayer.layers import LayerList +from tensorlayer.layers import SequentialLayer from tensorlayer.layers import Dense import tensorlayer as tl import numpy as np @@ -10,7 +10,7 @@ layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) -MLP = LayerList(layer_list) +MLP = SequentialLayer(layer_list) X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) diff --git a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py new file mode 100644 index 000000000..771fb18ee --- /dev/null +++ b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import numpy as np +import time +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout, BatchNorm1d + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800) + self.batchnorm = BatchNorm1d(act=tl.ReLU) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU) + + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + z = self.batchnorm(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out + + +MLP = CustomModel() +# Automatic inference input of shape. +# If Layer has no input in_channels, init_build(input) must be called to initialize the weights. +MLP.init_build(tl.layers.Input(shape=(1, 784))) + +n_epoch = 50 +batch_size = 500 +print_freq = 5 +train_weights = MLP.trainable_weights +optimizer = tl.optimizers.Adam(lr=0.0001) + +for epoch in range(n_epoch): ## iterate the dataset n_epoch times + start_time = time.time() + ## iterate over the entire training set once (shuffle the data via training) + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + MLP.set_train() # enable dropout + with tf.GradientTape() as tape: + ## compute outputs + _logits = MLP(X_batch) + ## compute loss and update model + _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + grad = tape.gradient(_loss, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) + + ## use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): + _logits = MLP(X_batch) + train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): + _logits = MLP(X_batch) # is_train=False, disable dropout + val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + +## use testing data to evaluate the model +MLP.set_eval() +test_loss, test_acc, n_iter = 0, 0, 0 +for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): + _logits = MLP(X_batch, foo=1) + test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 +print(" test foo=1 loss: {}".format(val_loss / n_iter)) +print(" test foo=1 acc: {}".format(val_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py similarity index 75% rename from examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py index 02ab3e847..e765f3aa5 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- - +import os +# os.environ['TL_BACKEND'] = 'tensorflow' +os.environ['TL_BACKEND'] = 'mindspore' import time import numpy as np import multiprocessing @@ -23,14 +25,14 @@ class CNN(Module): def __init__(self): super(CNN, self).__init__() - self.conv1 = Conv2d(64, (5, 5), (2, 2), padding='SAME', b_init=None, name='conv1', in_channels=3, act=tl.ReLU, data_format='channels_first') - self.bn = BatchNorm2d(num_features=64, act=tl.ReLU, data_format='channels_first') - self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1', data_format='channels_first') - self.conv2 = Conv2d(128, (5, 5), (2, 2), padding='SAME', act=tl.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first') - self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2', data_format='channels_first') + self.conv1 = Conv2d(64, (5, 5), (2, 2), b_init=None, name='conv1', in_channels=3, act=tl.ReLU) + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), name='pool1') + self.conv2 = Conv2d(128, (5, 5), (2, 2), act=tl.ReLU, b_init=None, name='conv2', in_channels=64) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), name='pool2') self.flatten = Flatten(name='flatten') - self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=4608) + self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=512) self.dense2 = Dense(84, act=tl.ReLU, name='dense2relu', in_channels=120) self.dense3 = Dense(10, act=None, name='output', in_channels=84) @@ -127,8 +129,6 @@ def forward(self, x, label): for X_batch, y_batch in train_ds: X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) - X_batch = tl.nhwc_to_nchw(X_batch) - y_batch = tl.nhwc_to_nchw(y_batch) output = net(X_batch) loss_output = criterion(output, y_batch) grads = train_network(X_batch, y_batch) @@ -142,25 +142,3 @@ def forward(self, x, label): print(" train acc: {}".format(train_acc / n_iter)) print(" loss ", loss) -# start_time = time.time() - -# train_loss, train_acc, n_iter = 0, 0, 0 -# for X_batch, y_batch in train_ds: -# net.set_train() - -# with tf.GradientTape() as tape: -# # compute outputs -# _logits = net(X_batch) -# # compute loss and update model -# _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - -# grad = tape.gradient(_loss_ce, train_weights) -# optimizer.apply_gradients(zip(grad, train_weights)) - -# train_loss += _loss_ce -# train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) -# n_iter += 1 - -# print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) -# print(" train loss: {}".format(train_loss / n_iter)) -# print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py similarity index 98% rename from examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py index f399bef22..cbd47e2d2 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed import time import numpy as np @@ -164,7 +165,7 @@ def _map_fn_test(img, target): # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - net.eval() + net.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout @@ -175,7 +176,7 @@ def _map_fn_test(img, target): print(" val acc: {}".format(val_acc / n_iter)) # use testing data to evaluate the model -net.eval() +net.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py deleted file mode 100644 index 9c06ec54c..000000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os -os.environ['TL_BACKEND'] = 'dragon' - -from tensorlayer.layers import Module -from tensorlayer.layers import Dense -import tensorlayer as tl -import dragon as dg -import time -import argparse -import numpy as np - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - -class CustomModel(Module): - - def __init__(self): - super(CustomModel, self).__init__() - self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) - self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) - self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) - - def forward(self, x, foo=None): - z = self.dense1(x) - z = self.dense2(z) - out = self.dense3(z) - return out - - -def parse_args(): - """Parse the arguments.""" - parser = argparse.ArgumentParser(description='Train a cifar10 resnet') - parser.add_argument('--execution', default='EAGER_MODE', type=str, help='The execution mode') - parser.add_argument('--seed', default=1337, type=int, help='The random seed') - parser.add_argument('--cuda', default=-1, type=int, help='The cuda device to use') - return parser.parse_args() - - -class Classifier(object): - """The base classifier class.""" - - # TensorSpec for graph execution - image_spec = dg.Tensor([None, 3, 32, 32], 'float32') - label_spec = dg.Tensor([None], 'int64') - - def __init__(self, optimizer): - super(Classifier, self).__init__() - self.net = CustomModel() - self.optimizer = optimizer - self.params = self.net.trainable_weights - - def step(self, image, label): - with dg.GradientTape() as tape: - logit = self.net(image) - # logit = dg.cast(logit, 'float64') - logit = dg.cast(dg.math.argmax(logit, -1), 'int64') - label = dg.cast(label, 'int64') - # print("logit :\n", logit, label) - # loss = dg.losses.smooth_l1_loss([logit, label]) - loss = dg.math.sum(logit - label) # dg.losses.sparse_softmax_cross_entropy([logit, label]) - accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32')) - grads = tape.gradient(loss, self.params) - self.optimizer.apply_gradients(zip(self.params, grads)) - return loss, accuracy, self.optimizer - - -if __name__ == '__main__': - args = parse_args() - dg.logging.info('Called with args:\n' + str(args)) - - np.random.seed(args.seed) - dg.autograph.set_execution(args.execution) - dg.cuda.set_default_device(args.cuda) - - # Define the model - model = Classifier(dg.optimizers.SGD(base_lr=0.01, momentum=0.9, weight_decay=1e-4)) - - # Compile for graph execution if necessary - if args.execution == 'GRAPH_MODE': - model.step = dg.function( - func=model.step, - input_signature=[model.image_spec, model.label_spec], - ) - - # Main loop - import tensorflow as tf - batch_size = 200 - for i in range(50): - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - image = dg.EagerTensor(X_batch, copy=False) - label = dg.EagerTensor(y_batch, copy=False, dtype='float32') - loss, accuracy, _ = model.step(image, label) - if i % 20 == 0: - dg.logging.info( - 'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' % - (i, str(model.optimizer.base_lr), loss, accuracy) - ) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py similarity index 100% rename from examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py rename to examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py similarity index 98% rename from examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py rename to examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py index 128739182..e3524e161 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed import numpy as np import time @@ -59,7 +60,6 @@ def forward(self, x, foo=None): ## use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.set_train() print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): @@ -80,7 +80,7 @@ def forward(self, x, foo=None): print(" val acc: {}".format(val_acc / n_iter)) ## use testing data to evaluate the model -MLP.eval() +MLP.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): _logits = MLP(X_batch, foo=1) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 4d2bc7ccc..0506bd005 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import numpy as np +# The same set of code can switch the backend with one line import os os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' +import numpy as np import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Dropout diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index 27ae9be8c..36b51dba4 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -189,7 +189,7 @@ def _map_fn_test(img, target): # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - net.eval() + net.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout @@ -200,7 +200,7 @@ def _map_fn_test(img, target): print(" val acc: {}".format(val_acc / n_iter)) # use testing data to evaluate the model -net.eval() +net.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py index ce02d34bd..224b40c56 100644 --- a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py +++ b/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py @@ -40,4 +40,7 @@ def forward(self, x): optimizer = tl.optimizers.Adam(learning_rate=0.001) metric = tl.metric.Accuracy() model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) -model.train(n_epoch=20, train_dataset=train_loader, print_freq=5, print_train_batch=True) +model.train(n_epoch=2, train_dataset=train_loader, print_freq=5, print_train_batch=True) +model.save_weights('./model_mlp.npz', format='npz_dict') +model.load_weights('./model_mlp.npz', format='npz_dict') +# model.eval(train_loader) \ No newline at end of file diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 96277aefa..1cef00995 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -33,6 +33,8 @@ from .load_backend import GroupConv2D from .load_backend import BinaryConv2D from .load_backend import DorefaConv2D +from .load_backend import MaxPool1d +from .load_backend import AvgPool1d from .load_backend import ReLU from .load_backend import ReLU6 @@ -58,6 +60,8 @@ from .load_backend import AdaptiveMaxPool1D from .load_backend import AdaptiveMaxPool2D from .load_backend import AdaptiveMaxPool3D +from .load_backend import Floor +from .load_backend import Ceil # load ops from .load_backend import Variable @@ -112,10 +116,10 @@ from .load_backend import L2Normalize from .load_backend import EmbeddingLookup from .load_backend import NCELoss -from .load_backend import Not_equal +from .load_backend import NotEqual from .load_backend import Cast from .load_backend import ExpandDims -from .load_backend import Count_nonzero +from .load_backend import CountNonzero from .load_backend import FlattenReshape from .load_backend import Transpose from .load_backend import MatMul diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py deleted file mode 100644 index e62f27e84..000000000 --- a/tensorlayer/backend/ops/dragon_backend.py +++ /dev/null @@ -1,1049 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import absolute_import, division, print_function - -import numpy as np -import dragon as D - -from dragon.core.eager import context -from dragon.core.ops import init_ops -from dragon.core.ops import vision_ops - -_dtypeDict = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] -# TODO NotImplemented -DType = None -float16 = 'float16' -float32 = 'float32' -float64 = 'float64' -int8 = 'int8' -int16 = 'int16' -int32 = 'int32' -int64 = 'int64' -uint8 = 'uint8' -uint16 = 'uint16' -uint32 = 'uint32' -uint64 = 'uint64' - -# isinstance input output -# TODO NotImplemented -# TensorLike = None - - -def _getter(init_fn, **kwargs): - """Return an named eager tensor.""" - with context.eager_mode(): - value = init_fn(**kwargs) - value._name = kwargs.get('name', value.id) - return value - - -def set_context(**kwargs): - raise Exception("Using Dragon backend,You don't need to set context") - - -def get_tensor_shape(x): - return x.shape - - -# initializers -def zeros(shape, dtype='float32'): - """ - Creates a tensor with all elements set to zero. - - Parameters - ---------- - shape : A list of integers - a tuple of integers, or a 1-D Tensor of type int32. - dtype : tensor - The DType of an element in the resulting Tensor - - Returns - ------- - A Tensor with all elements set to zero. - - """ - return _getter( - init_ops.fill, - value=0, - shape=shape, - dtype=dtype, - ) - - -def ones(shape, dtype='float32'): - """ - Creates a tensor with all elements set to ones. - - Parameters - ---------- - shape : A list of integers - a tuple of integers, or a 1-D Tensor of type int32. - dtype : tensor - The DType of an element in the resulting Tensor - - Returns - ------- - A Tensor with all elements set to zero. - - """ - return _getter( - init_ops.fill, - value=1, - shape=shape, - dtype=dtype, - ) - - -def constant(value, shape, dtype='float32'): - """ - Creates a constant tensor from a tensor-like object. - - Parameters - ---------- - value : list - A constant value (or list) of output type dtype. - dtype : tensor - The type of the elements of the resulting tensor. - shape : tuple - Optional dimensions of resulting tensor. - - Returns - ------- - A Constant Tensor. - - """ - # shape = shape[::-1] - return _getter( - init_ops.fill, - value=value, - shape=shape, - dtype=dtype, - ) - - -def random_uniform(shape, minval=0, maxval=None, dtype='float32', seed=None): - """ - Outputs random values from a uniform distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - minval : int - The lower bound on the range of random values to generate (inclusive). Defaults to 0. - maxval : int - The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. - dtype : tensor - The type of the output: float16, float32, float64, int32, or int64. - seed : int - Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls. - Returns - ------- - A tensor of the specified shape filled with random uniform values. - - """ - return _getter(init_ops.random_uniform, low=minval, high=maxval, shape=shape, dtype=dtype) - - -def random_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): - """ - Outputs random values from a normal distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - mean : float - The mean of the normal distribution - stddev : float - The standard deviation of the normal distribution. - dtype : tensor - The type of the output. - seed : A Python integer - Used to create a random seed for the distribution - - Returns - ------- - A tensor of the specified shape filled with random normal values. - - """ - return _getter( - init_ops.random_normal, - mean=mean, - std=stddev, - shape=shape, - dtype=dtype, - ) - - -def truncated_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): - """ - Outputs random values from a truncated normal distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - mean : float - The mean of the normal distribution - stddev : float - The standard deviation of the normal distribution. - dtype : tensor - The type of the output. - seed : A Python integer - Used to create a random seed for the distribution - - Returns - ------- - A tensor of the specified shape filled with random truncated normal values. - - """ - return _getter( - init_ops.truncated_normal, - mean=mean, - std=stddev, - shape=shape, - dtype=dtype, - ) - - -def he_normal(shape, dtype, seed=None): - """ - He normal initializer. - - Parameters - ---------- - seed : A Python integer. - Used to seed the random generator. - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - dtype : tensor - The type of the output. - - Returns - ------- - A tensor of the specified shape filled with he normal values. - """ - # shape = shape[::-1] - raise NotImplementedError("He_Normal is not implemented") - - -def Variable(initial_value, name, trainable=None): - """ - Creates a new variable with value initial_value. - - Parameters - ---------- - initial_value : tensor - A Tensor, or Python object convertible to a Tensor - name : str - Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. - Returns - ------- - Variable - """ - return D.Tensor(name=name, shape=initial_value) - - -class MatMul(object): - - def __init__(self): - pass - - def __call__(self, a, b): - inputs = [a, b] - return D.math.matmul(inputs) - - -def matmul(a, b): - """ - Multiplies matrix a by matrix b, producing a * b. - - Parameters - ---------- - a : tensor - type float16, float32, float64, int32, complex64, complex128 and rank > 1. - b : tensor - with same type and rank as a. - - Returns - ------- - A Tensor of the same type as a and b - """ - inputs = [a, b] - return D.math.matmul(inputs) - - -def add(value, bias): - """ - Returns x + y element-wise. - - Parameters - ---------- - value : tensor. - Must be one of the following types: bfloat16, half, float32, float64, - uint8, int8, int16, int32, int64, complex64, complex128, string. - bias : tensor - Must have the same type as a - name : str - A name for the operation - - Returns - ------- - A Tensor. Has the same type as a. - """ - - inputs = [value, bias] - return D.math.add(inputs) - - -def dtypes(dt): - """ - Data dtypes. - - Parameters - ---------- - dt : string - It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', - 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. - - Returns - ------- - Data dtypes - """ - if dt not in _dtypeDict: - raise Exception("Unsupported dtype: {}".format(dt)) - return dt - - -def minimum(x, y): - """ - Returns the min of x and y (i.e. x < y ? x : y) element-wise. - - Parameters - ---------- - x : tensor. - Must be one of the following types: bfloat16, half, float32, float64, int32, int64. - y : A Tensor. - Must have the same type as x. - name : str - A name for the operation (optional). - - Returns - ------- - A Tensor. Has the same type as x - """ - inputs = [x, y] - return D.math.minimum(inputs) - - -class FlattenReshape(object): - - def __init__(self): - pass - - def __call__(self, inputs): - dim = 1 - for d in get_tensor_shape(inputs)[1:]: - dim *= d - return D.reshape(inputs, [-1, dim]) - - -class Reshape(object): - - def __init__(self, shape): - self.shape = shape - - def __call__(self, tensor): - return D.reshape(tensor, shape=self.shape) - - -def reshape(tensor, shape): - """ - Reshapes a tensor. - - Parameters - ---------- - tensor : tensor - A Tensor. - shape : tensor - Defines the shape of the output tensor. - Returns - ------- - A Tensor. Has the same type as tensor - """ - return D.reshape(tensor, shape=shape) - - -class Concat(object): - - def __init__(self, axis): - super(Concat, self).__init__() - self.axis = axis - - def __call__(self, values): - return D.concat(values=values, axis=self.axis) - - -def concat(values, axis): - """ - Concatenates tensors along one dimension. - - Parameters - ---------- - values : list - A list of Tensor objects or a single Tensor - axis : int - 0-D int32 Tensor. Dimension along which to concatenate - Returns - ------- - A Tensor resulting from concatenation of the input tensors. - """ - return D.concat(values, axis=axis) - - -def convert_to_tensor(value, dtype=None): - """ - Converts the given value to a Tensor. - - Parameters - ---------- - value : object - An object whose type has a registered Tensor conversion function. - dtype : optional - Optional element type for the returned tensor. If missing, the type is inferred from the type of value. - - Returns - ------- - A Tensor based on value. - """ - return D.Tensor.convert_to(value, dtype) - - -def sqrt(x): - """ - Computes square root of x element-wise. - - Parameters - ---------- - x : tensor - Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. - - Returns - ------- - A Tensor. Has the same type as x. - """ - return D.math.sqrt(x) - - -class ReduceSum(object): - - def __init__(self, axis): - pass - - def construct(self, input): - pass - - -class ReduceMean(object): - - def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=1, - strides=1, - pads=0, - mode='AVG', - global_pooling=True, - data_format=self.data_format, - ) - - -def reduce_mean(input_tensor, axis=None): - """ - Computes the mean of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - - return D.mean(input_tensor, axes=axis) - - -class ReduceMax(object): - - def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, kernel_shape=1, strides=1, pads=0, mode='MAX', global_pooling=True, data_format=self.data_format - ) - - -def reduce_max(input_tensor, axis=None): - """ - Computes the maximum of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have real numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - - return D.max(input_tensor, axis) - - -def reduce_min(input_tensor, axis=None): - """ - Computes the minimum of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have real numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - return D.min(input_tensor, axis) - - -class Pad(object): - - def __init__(self, paddings, mode="REFLECT"): - if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: - raise Exception("Unsupported mode: {}".format(mode)) - if mode == 'SYMMETRIC': - mode = 'EDGE' - self.paddings = paddings - self.mode = mode - - def __call__(self, x): - outputs = D.pad(x, pads=self.paddings, mode=self.mode, value=0) - return outputs - - -def pad(tensor, paddings, mode='CONSTANT', constant_values=0): - """ - Pads a tensor. - - Parameters - ---------- - tensor : tensor - A Tensor. - paddings : tuple - A tuple of type int32. - mode : str - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - constant_values : int - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - - Returns - ------- - A Tensor. Has the same type as tensor. - """ - if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: - raise Exception("Unsupported mode: {}".format(mode)) - if mode == 'SYMMETRIC': - mode = 'EDGE' - outputs = D.pad(tensor, pads=paddings, mode=mode, value=constant_values) - return outputs - - -class Unstack(object): - - def __init__(self, axis, num=None): - self.axis = axis - self.num = num - - def __call__(self, values): - raise NotImplementedError - - -class Stack(object): - - def __init__(self, axis): - self.axis = axis - - def __call__(self, values): - return D.stack(values, axis=self.axis) - - -def stack(values, axis=0): - """ - Stacks a list of rank-R tensors into one rank-(R+1) tensor. - - Parameters - ---------- - values : list - A list of Tensor objects with the same shape and type. - axis : int - An int. The axis to stack along. Defaults to the first dimension. - Negative values wrap around, so the valid range is [-(R+1), R+1). - - Returns - ------- - A stacked Tensor with the same type as values. - """ - return D.stack(values, axis=axis) - - -class Meshgrid(object): - - def __init__(self, indexing='xy'): - super(Meshgrid, self).__init__() - self.index = indexing - - def __call__(self, inputs): - pass - - -def meshgrid(x, y): - """ - Broadcasts parameters for evaluation on an N-D grid. - - Parameters - ---------- - x : tensor - Tensors with rank 1. - y : tensor - Tensors with rank 1. - - Returns - ------- - A list of N Tensors with rank N. - """ - - pass - - -def range(start, limit=None, delta=1, dtype=None): - """ - Creates a sequence of numbers. - - Parameters - ---------- - start : tensor - A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; - otherwise, acts as range limit and first entry defaults to 0. - limit : tensor - A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, - defaults to the value of start while the first entry of the range defaults to 0. - delta : tensor - A 0-D Tensor (scalar). Number that increments start. Defaults to 1. - dtype : type - The type of the elements of the resulting tensor. - - Returns - ------- - An 1-D Tensor of type dtype. - """ - if dtype is None: - dtype = 'int32' - if limit is None: - outputs = D.arange(start=0, stop=start, step=delta, dtype=dtype) - else: - outputs = D.arange(start, stop=limit, step=delta, dtype=dtype) - return outputs - - -class ExpandDims(object): - - def __init__(self, axis): - pass - - def construct(self, input): - pass - - -def expand_dims(input, axis): - """ - Inserts a dimension of 1 into a tensor's shape. - - Parameters - ---------- - input : tensor - A Tensor. - axis : int - 0-D (scalar). Specifies the dimension index at which to expand the shape of input. - Must be in the range [-rank(input) - 1, rank(input)]. - - Returns - ------- - A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. - """ - - return D.expand_dims(input, axis=axis) - - -class Tile(object): - - def __init__(self): - pass - - def __call__(self, input, multiples): - return D.tile(input, multiples) - - -def tile(input, multiples): - """ - Constructs a tensor by tiling a given tensor. - - Parameters - ---------- - input : tensor - A Tensor. 1-D or higher. - multiples : tensor - Must be one of the following types: int32, int64. 1-D. - Length must be the same as the number of dimensions in input - - Returns - ------- - A Tensor. Has the same type as input. - """ - return D.tile(input, multiples) - - -class Cast(object): - - def __init__(self, dtype): - pass - - def __call__(self, input): - pass - - -def cast(x, dtype): - """ - Casts a tensor to a new type. - - Parameters - ---------- - x : tensor - A Tensor or SparseTensor or IndexedSlices of numeric type. - It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. - dtype : dtpye - The destination type. The list of supported dtypes is the same as x - - Returns - ------- - A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. - """ - return D.cast(x, dtype=dtype) - - -class Transpose(object): - - def __init__(self, perm, conjugate=False): - self.perm = perm - if conjugate: - raise ("The conjugate Parameters not supported") - - def __call__(self, a): - return D.transpose(a, self.perm) - - -def transpose(a, perm=None, conjugate=False): - """ - Transposes a. - - Parameters - ---------- - a : tensor - A Tensor. - perm : int - A permutation of the dimensions of a. - conjugate : bool - Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)). - - Returns - ------- - A transposed Tensor. - """ - - conjugate = conjugate - return D.transpose(a, perm=perm) - - -def gather_nd(params, indices, batch_dims=0): - """ - Gather slices from params into a Tensor with shape specified by indices. - - Parameters - ---------- - params : tensor - The tensor from which to gather values. - indices : tensor - Must be one of the following types: int32, int64. Index tensor. - batch_dims : int - An integer or a scalar 'Tensor'. The number of batch dimensions. - - Returns - ------- - A Tensor. Has the same type as params. - """ - - pass - - -def clip_by_value(t, clip_value_min, clip_value_max): - """ - Clips tensor values to a specified min and max. - - Parameters - ---------- - t : tensor - A Tensor or IndexedSlices - clip_value_min : tensor - A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by - clip_value_max : tensor - A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by - - Returns - ------- - A clipped Tensor or IndexedSlices. - """ - - pass - - -def split(value, num_or_size_splits, axis=0, num=None): - """ - Splits a tensor into sub tensors. - - Parameters - ---------- - value : tensor - The Tensor to split. - num_or_size_splits : list - Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or - Python list containing the sizes of each output tensor along split_dim. - axis : int - The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. - num : int - used to specify the number of outputs when it cannot be inferred from the shape of size_splits. - - Returns - ------- - Tensor objects resulting from splitting value. - """ - pass - - -def floor(x): - return D.math.floor(x) - - -def gather(params, indices): - return NotImplementedError - - -def linspace(start, stop, num): - return D.linspace(start, stop, num) - - -def slice(inputs, starts, sizes): - return D.slice(inputs, starts, sizes) - - -def add_n(inputs): - return NotImplementedError - - -class OneHot(object): - - def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype='float32'): - self.depth = depth - self.dtype = dtype - - def __call__(self, indices): - outputs = np.zeros(shape=(indices.shape[0], self.depth)) - for i in np.arange(indices.shape[0]): - outputs[int(i)][int(indices[int(i)].get_value())] = 1 - outputs = D.constant(outputs, dtype=self.dtype) - return outputs - - -class L2Normalize(object): - - def __init__(self, axis=None, epsilon=1e-12): - super(L2Normalize, self).__init__() - pass - - def __call__(self, input, *args, **kwargs): - pass - - -class EmbeddingLookup(object): - - def __init__(self, max_norm=None): - self.max_norm = max_norm - - def __call__(self, params, ids, *args, **kwargs): - pass - - -class NCELoss(object): - - def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): - super(NCELoss, self).__init__() - - def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): - pass - - -class Not_equal(object): - - def __init__(self): - pass - - def __call__(self, x, y): - pass - - -class Count_nonzero(object): - - def __init__(self, keepdims=None, dtype='int64'): - pass - - def __call__(self, *args, **kwargs): - pass - - -class Resize: - - def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): - if method not in ['nearest', 'linear', 'bilinear']: - raise ('Current resize does not support this method.') - if method == 'bilinear': - method = 'linear' - self.method = method - self.antialias = antialias - self.scale = scale - if data_format != 'channel_last': - raise Exception("UpSampling2d resize_images only support channel_last") - - def __call__(self, inputs): - output_size = (int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1])) - outputs = D.vision.resize(inputs, sizes=output_size, mode=self.method, align_corners=self.antialias) - return outputs - - -def resize(inputs, output_size, method, antialias): - if method not in ['nearest', 'linear', 'bilinear']: - raise ('Current resize does not support this method.') - if method == 'bilinear': - method = 'linear' - return D.vision.resize(inputs, sizes=output_size, mode=method, align_corners=antialias) - - -class ZeroPadding1D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class ZeroPadding2D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class ZeroPadding3D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class Sign(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.math.sign(x) - - -def ceil(x): - raise NotImplementedError - - -def multiply(x, y): - raise NotImplementedError - - -def divide(x, y): - raise NotImplementedError - - -def identity(x): - raise NotImplementedError - - -class BatchToSpace(object): - - def __init__(self, block_size, crops): - super(BatchToSpace, self).__init__() - pass - - def __call__(self, input_x): - raise NotImplementedError - - -class DepthToSpace(object): - - def __init__(self, block_size, data_format='NHWC'): - pass - - def __call__(self, input): - raise NotImplementedError diff --git a/tensorlayer/backend/ops/dragon_nn.py b/tensorlayer/backend/ops/dragon_nn.py deleted file mode 100644 index e6b5105ef..000000000 --- a/tensorlayer/backend/ops/dragon_nn.py +++ /dev/null @@ -1,910 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -import dragon as D -from dragon.core.ops import vision_ops -from dragon.core.ops import activation_ops - - -def padding_format(padding): - """ - Checks that the padding format correspond format. - - Parameters - ---------- - padding : str - Must be one of the following:"same", "SAME", "VALID", "valid" - - Returns - ------- - str "SAME" or "VALID" - """ - - if padding in ["SAME", "same"]: - padding = "SAME" - elif padding in ["VALID", "valid"]: - padding = "VALID" - elif padding == None: - padding = None - else: - raise Exception("Unsupported padding: " + str(padding)) - return padding - - -def preprocess_1d_format(data_format, padding): - """ - Checks that the 1-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NWC","NCW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NWC" or "NCW" and "SAME" or "VALID" - """ - - if data_format in ["channels_last", "NWC"]: - data_format = "NWC" - elif data_format in ["channels_first", "NCW"]: - data_format = "NCW" - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def preprocess_2d_format(data_format, padding): - """ - Checks that the 2-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NHWC","NCHW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NHWC" or "NCHW" and "SAME" or "VALID" - """ - - if data_format in ["channels_last", "NHWC", "nhwc"]: - data_format = "NHWC" - elif data_format in ["channels_first", "NCHW", "nchw"]: - data_format = "NCHW" - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def preprocess_3d_format(data_format, padding): - """ - Checks that the 3-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NDHWC" or "NCDHW" and "SAME" or "VALID" - """ - - if data_format in ['channels_last', 'NDHWC']: - data_format = 'NDHWC' - elif data_format in ['channels_first', 'NCDHW']: - data_format = 'NCDHW' - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def nchw_to_nhwc(x): - """ - Channels first to channels last - - Parameters - ---------- - x : tensor - channels first tensor data - - Returns - ------- - channels last tensor data - """ - - pass - - -def nhwc_to_nchw(x): - """ - Channles last to channels first - - Parameters - ---------- - x : tensor - channels last tensor data - - Returns - ------- - channels first tensor data - """ - - pass - - -class ReLU(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.relu(x) - - -def relu(x): - """ - Computes rectified linear: max(features, 0). - - Parameters - ---------- - x : tensor - Must be one of the following types: float32, float64, int32, uint8, int16, - int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. - - Returns - ------- - A Tensor. Has the same type as features. - """ - return D.nn.relu(x) - - -class ReLU6(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.relu6(x) - - -def relu6(x): - """ - Computes Rectified Linear 6: min(max(features, 0), 6). - - Parameters - ---------- - x : tensor - Must be one of the following types: float32, float64, int32, uint8, int16, - int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. - - Returns - ------- - A Tensor with the same type as features. - """ - return D.nn.relu6(x) - - -class LeakyReLU(object): - - def __init__(self, alpha=0.2): - self.alpha = alpha - - def __call__(self, x): - return D.nn.leaky_relu(x, alpha=self.alpha) - - -def leaky_relu(x): - """ - Compute the Leaky ReLU activation function. - - Parameters - ---------- - x : tensor - representing preactivation values. Must be one of the following types: - float16, float32, float64, int32, int64. - - Returns - ------- - The activation value. - """ - - return D.nn.leaky_relu(x) - - -class Softplus(object): - - def __init__(self): - pass - - def __call__(self, x): - raise NotImplementedError - - -def softplus(x): - """ - Computes softplus: log(exp(features) + 1). - - Parameters - ---------- - x : tensor - Must be one of the following types: half, bfloat16, float32, float64. - - Returns - ------- - A Tensor. Has the same type as features. - """ - - raise NotImplementedError - - -class Tanh(object): - - def __init__(self): - pass - - def __call__(self, x): - return activation_ops.tanh(x) - - -def tanh(x): - """ - Computes hyperbolic tangent of x element-wise. - - Parameters - ---------- - x : tensor - Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. - - Returns - ------- - A Tensor. Has the same type as x. - """ - - return activation_ops.tanh(x) - - -class Sigmoid(object): - - def __init__(self): - pass - - def __call__(self, x): - return activation_ops.sigmoid(x) - - -def sigmoid(x): - """ - Computes sigmoid of x element-wise. - - Parameters - ---------- - x : tensor - A Tensor with type float16, float32, float64, complex64, or complex128. - - Returns - ------- - A Tensor with the same type as x. - """ - return activation_ops.sigmoid(x) - - -class Softmax(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.softmax(x) - - -def softmax(logits, axis=None): - """ - Computes softmax activations. - - Parameters - ---------- - logits : tensor - Must be one of the following types: half, float32, float64. - axis : int - The dimension softmax would be performed on. The default is -1 which indicates the last dimension. - - Returns - ------- - A Tensor. Has the same type and shape as logits. - """ - return D.nn.softmax(logits) - - -class Dropout(object): - - def __init__(self, keep, seed=1): - self.keep = 1 - keep - self.seed = seed - - def __call__(self, inputs): - return D.nn.dropout(inputs, prob=self.keep) - - -class BiasAdd(object): - """ - Adds bias to value. - - Parameters - ---------- - x : tensor - A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. - bias : tensor - Must be the same type as value unless value is a quantized type, - in which case a different quantized type may be used. - Returns - ------- - A Tensor with the same type as value. - """ - - def __init__(self, data_format='NHWC'): - self.data_format = data_format - - def __call__(self, x, bias): - inputs = [x, bias] - return vision_ops.bias_add(inputs, data_format=self.data_format) - - -def bias_add(x, bias): - """ - Adds bias to value. - - Parameters - ---------- - x : tensor - A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. - bias : tensor - Must be the same type as value unless value is a quantized type, - in which case a different quantized type may be used. - data_format : A string. - 'N...C' and 'NC...' are supported. - name : str - A name for the operation (optional). - Returns - ------- - A Tensor with the same type as value. - """ - inputs = [x, bias] - return vision_ops.bias_add(inputs, data_format='NHWC') - - -class Conv1D(object): - pass - # raise NotImplementedError - - -def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): - """ - Computes a 1-D convolution given 3-D input and filter tensors. - - Parameters - ---------- - input : tensor - A 3D Tensor. Must be of type float16, float32, or float64 - filters : tensor - A 3D Tensor. Must have the same type as input. - stride : int of list - An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. - padding : string - 'SAME' or 'VALID' - data_format : string - An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of - [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. - dilations : int or list - An int or list of ints that has length 1 or 3 which defaults to 1. - The dilation factor for each dimension of input. If set to k > 1, - there will be k-1 skipped cells between each filter element on that dimension. - Dilations in the batch and depth dimensions must be 1. - name : string - A name for the operation (optional). - Returns - ------- - A Tensor. Has the same type as input. - """ - - pass - - -class Conv2D(object): - - def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = k_size[0] - if self.data_format is 'NHWC': - self.dg_stride = strides[1] - self.dg_dilation = dilations[1] - elif self.data_format is 'NCHW': - self.dg_stride = strides[2] - self.dg_dilation = dilations[2] - - def __call__(self, inputs, filters): - outputs = vision_ops.conv2d( - [inputs, filters], - kernel_shape=self.ksize, - strides=self.dg_stride, - padding=self.padding, - dilations=self.dg_dilation, - data_format=self.data_format, - ) - return outputs - - -def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): - """ - Computes a 2-D convolution given 4-D input and filters tensors. - - Parameters - ---------- - input : tensor - Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. - The dimension order is interpreted according to the value of data_format, see below for details. - filters : tensor - Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] - strides : int of list - The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. - By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. - padding : string - "SAME" or "VALID" - data_format : string - "NHWC", "NCHW". Defaults to "NCHW". - dilations : list or ints - list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. - - Returns - ------- - A Tensor. Has the same type as input. - """ - raise NotImplementedError - - -class Conv3D(object): - pass - # raise NotImplementedError - - -def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): - """ - Computes a 3-D convolution given 5-D input and filters tensors. - - Parameters - ---------- - input : tensor - Must be one of the following types: half, bfloat16, float32, float64. - Shape [batch, in_depth, in_height, in_width, in_channels]. - filters : tensor - Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. - in_channels must match between input and filters. - strides : list of ints - A list of ints that has length >= 5. 1-D tensor of length 5. - The stride of the sliding window for each dimension of input. - Must have strides[0] = strides[4] = 1. - padding : string - A string from: "SAME", "VALID". The type of padding algorithm to use. - data_format : string - An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. - With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. - Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - dilations : list of ints - Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. - If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. - The dimension order is determined by the value of data_format, see above for details. - Dilations in the batch and depth dimensions must be 1. - name : string - A name for the operation (optional). - - Returns - ------- - A Tensor. Has the same type as input. - """ - - raise NotImplementedError - - -def lrn(inputs, depth_radius, bias, alpha, beta): - """ - Local Response Normalization. - - Parameters - ---------- - inputs : tensor - Must be one of the following types: half, bfloat16, float32. 4-D. - depth_radius : int - Defaults to 5. 0-D. Half-width of the 1-D normalization window. - bias : float - Defaults to 1. An offset (usually positive to avoid dividing by 0). - alpha : float - Defaults to 1. A scale factor, usually positive. - beta : float - Defaults to 0.5. An exponent. - - Returns - ------- - A Tensor. Has the same type as input. - """ - pass - - -def moments(x, axes, shift=None, keepdims=False): - """ - Calculates the mean and variance of x. - - Parameters - ---------- - x : tensor - A Tensor - axes : ints - Axes along which to compute mean and variance. - shift : int - Not used in the current implementation. - keepdims : bool - produce moments with the same dimensionality as the input. - - Returns - ------- - Two Tensor objects: mean and variance. - """ - - pass - - -class MaxPool(object): - - def __init__(self, ksize, strides, padding, data_format=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = ksize - self.strides = strides - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=self.ksize, - strides=self.strides, - padding=self.padding, - mode='MAX', - global_pooling=False, - data_format=self.data_format, - ) - - -def max_pool(input, ksize, strides, padding, data_format=None): - """ - Performs the max pooling on the input. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start - with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". - Pooling happens over the spatial dimensions only. - ksize : int or list of ints - An int or list of ints that has length 1, N or N+2. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, N or N+2. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - - Returns - ------- - A Tensor of format specified by data_format. The max pooled output tensor. - """ - pass - - -class AvgPool(object): - - def __init__(self, ksize, strides, padding, data_format=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.filter_size = ksize - self.strides = strides - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=self.filter_size, - strides=self.strides, - padding=self.padding, - mode='AVG', - global_pooling=False, - data_format=self.data_format, - ) - - -def avg_pool(input, ksize, strides, padding): - """ - Performs the avg pooling on the input. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] - if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape - if data_format starts with "NC". Pooling happens over the spatial dimensions only. - ksize : int or list of ints - An int or list of ints that has length 1, N or N+2. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, N or N+2. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - - Returns - ------- - A Tensor of format specified by data_format. The average pooled output tensor. - """ - pass - - -def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): - """ - Performs the max pooling on the input. - - Parameters - ---------- - input : tensor - A 5-D Tensor of the format specified by data_format. - ksize : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. - With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. - Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - name : string - A name for the operation (optional). - - Returns - ------- - A Tensor of format specified by data_format. The max pooled output tensor. - """ - pass - - -def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): - """ - Performs the average pooling on the input. - - Parameters - ---------- - input : tensor - A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. - ksize : int or list of ints - An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NDHWC' and 'NCDHW' are supported. - name : string - Optional name for the operation. - - Returns - ------- - A Tensor with the same type as value. The average pooled output tensor. - """ - pass - - -def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): - """ - Performs an N-D pooling operation. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] - if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape - if data_format starts with "NC". Pooling happens over the spatial dimensions only. - window_shape : int - Sequence of N ints >= 1. - pooling_type : string - Specifies pooling operation, must be "AVG" or "MAX". - strides : ints - Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. - padding : string - The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". - See the "returns" section of tf.ops.convolution for details. - data_format : string - Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), - or the second dimension (if data_format starts with "NC"). - For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". - For N=3, the valid values are "NDHWC" (default) and "NCDHW". - dilations : list of ints - Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. - name : string - Optional. Name of the op. - - Returns - ------- - Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] - """ - pass - - -class DepthwiseConv2d(object): - - def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.stride = strides - self.dilations = dilations - - def __call__(self, input, filter): - raise NotImplementedError("Not implemented depthwiseconv2d") - - -def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): - """ - Depthwise 2-D convolution. - - Parameters - ---------- - input : tensor - 4-D with shape according to data_format. - filter : tensor - 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. - strides : list - 1-D of size 4. The stride of the sliding window for each dimension of input. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - The data format for input. Either "NHWC" (default) or "NCHW". - dilations : list - 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. - If it is greater than 1, then all values of strides must be 1. - name : string - A name for this operation (optional). - - Returns - ------- - A 4-D Tensor with shape according to data_format. - E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. - """ - - pass - - -def conv1d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None -): - """ - The transpose of conv1d. - - Parameters - ---------- - input : tensor - A 3-D Tensor of type float and shape [batch, in_width, in_channels] - for NWC data format or [batch, in_channels, in_width] for NCW data format. - filters : tensor - A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. - filter's in_channels dimension must match that of value. - output_shape : tensor - A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NWC' and 'NCW' are supported. - dilations : list - An int or list of ints that has length 1 or 3 which defaults to 1. - The dilation factor for each dimension of input. If set to k > 1, - there will be k-1 skipped cells between each filter element on that dimension. - Dilations in the batch and depth dimensions must be 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as value. - """ - pass - - -def conv2d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None -): - """ - The transpose of conv2d. - - Parameters - ---------- - input : tensor - A 4-D Tensor of type float and shape [batch, height, width, in_channels] - for NHWC data format or [batch, in_channels, height, width] for NCHW data format. - filters : tensor - A 4-D Tensor with the same type as input and shape [height, width, - output_channels, in_channels]. filter's in_channels dimension must match that of input. - output_shape : tensor - A 1-D Tensor representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. - If a single value is given it is replicated in the H and W dimension. - By default the N and C dimensions are set to 0. - The dimension order is determined by the value of data_format, see below for details. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NHWC' and 'NCHW' are supported. - dilations : list - An int or list of ints that has length 1, 2 or 4, defaults to 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as input. - """ - pass - - -def conv3d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None -): - """ - The transpose of conv3d. - - Parameters - ---------- - input : tensor - A 5-D Tensor of type float and shape [batch, height, width, in_channels] for - NHWC data format or [batch, in_channels, height, width] for NCHW data format. - filters : tensor - A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. - filter's in_channels dimension must match that of value. - output_shape : tensor - A 1-D Tensor representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1, 3 or 5. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NDHWC' and 'NCDHW' are supported. - dilations : list of ints - An int or list of ints that has length 1, 3 or 5, defaults to 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as value. - """ - - pass - - -class BatchNorm(object): - - def __init__(self): - pass - - def __call__(self, *args, **kwargs): - pass diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 4343507a2..e69505215 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -7,7 +7,6 @@ BACKEND = 'tensorflow' # BACKEND = 'mindspore' -# BACKEND = 'dragon' # Check for backend.json files tl_backend_dir = os.path.expanduser('~') @@ -57,20 +56,13 @@ import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - #context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), - context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), + context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), + # context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) sys.stderr.write('Using MindSpore backend.\n') -elif BACKEND == 'dragon': - from .dragon_backend import * - from .dragon_nn import * - import dragon as dg - BACKEND_VERSION = dg.__version__ - sys.stderr.write('Using Dragon backend.\n') - elif BACKEND == 'paddle': from .paddle_backend import * from .paddle_nn import * diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index b602a4b8d..4c4d0a2e5 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -11,12 +11,13 @@ initializer, Constant, Normal, TruncatedNormal, Initializer, _assignment, _calculate_in_and_out, One, Zero ) from mindspore.common.tensor import Tensor -from mindspore._c_expression import Tensor as Tensor_ from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops import composite as C import mindspore.context as context from mindspore.nn import Cell +from mindspore.ops import count_nonzero +import mindspore.numpy as msnp import numpy as np from scipy.stats import truncnorm @@ -919,7 +920,7 @@ def __init__(self, dtype): self.cast = P.Cast() def construct(self, input): - return self.cast(input, dtype=self.dtype) + return self.cast(input, self.dtype) def cast(x, dtype): @@ -1046,6 +1047,9 @@ def split(value, num_or_size_splits, axis=0, num=None): """ pass +class Floor(Cell): + def __call__(self, *args, **kwargs): + raise NotImplementedError def floor(x): return NotImplementedError @@ -1087,44 +1091,79 @@ def __init__(self, axis=None, epsilon=1e-12): super(L2Normalize, self).__init__() pass - def __call__(self, input, *args, **kwargs): + def construct(self, input, *args, **kwargs): pass class EmbeddingLookup(Cell): - def __init__(self, max_norm=None): + def __init__(self, max_norm=0): + super(EmbeddingLookup, self).__init__() self.max_norm = max_norm + self.embedding_lookup = P.EmbeddingLookup() - def __call__(self, params, ids, *args, **kwargs): - pass + def construct(self, params, ids, *args, **kwargs): + return self.embedding_lookup(params, ids, self.max_norm) -class NCELoss(object): +class NCELoss(Cell): def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): super(NCELoss, self).__init__() - - def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): pass + def construct(self, weights, biases, labels, inputs, num_sampled, num_classes): + raise NotImplementedError -class Not_equal(object): + +class NotEqual(Cell): def __init__(self): - pass + super(NotEqual, self).__init__() + self.not_equal = P.NotEqual() - def __call__(self, x, y): - pass + def construct(self, x, y): + outputs = self.not_equal(x, y) + return outputs -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype=int64): - pass + self.keepdims = keepdims + self.dtype = dtype - def __call__(self, *args, **kwargs): - pass + def __call__(self, input, axis=None): + input = self.convert_dtype(input) + return count_nonzero(x=input, axis=axis, keep_dims=self.keepdims, dtype=self.dtype) + + def bool_convert_to_tensor(self, x): + x = x.asnumpy() + shapes = x.shape + b = np.ones(shapes) + if len(shapes) == 1: + for i in range(shapes - 1): + if x[i] == True: + b[i] = 1 + else: + b[i] = 0 + if len(shapes) == 2: + for i in range(shapes[0] - 1): + for j in range(shapes[1] - 1): + if x[i][j] == True: + b[i][j] = 1 + else: + b[i][j] = 0 + return Tensor(b, dtype=float32) + + def convert_dtype(self, input): + if input.shape == 1 and type(input[0]) is bool: + output = self.bool_convert_to_tensor(input) + elif input.shape == 2 and type(input[0][0]) is bool: + output = self.bool_convert_to_tensor(input) + else: + output = input + return output class Resize(Cell): @@ -1207,6 +1246,13 @@ def __init__(self): def construct(self, x): return self.sign(x) +class Ceil(Cell): + def __init__(self): + super(Ceil, self).__init__() + self.ceil = P.Ceil() + + def construct(self, x): + return self.ceil(x) def ceil(x): _ceil = P.Ceil() @@ -1218,7 +1264,7 @@ def multiply(x, y): def divide(x, y): - raise NotImplementedError + return msnp.divide(x, y) def identity(x): diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 6e6619ef5..a2300934c 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -1,19 +1,20 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - from __future__ import absolute_import, division, print_function -from mindspore.nn.cell import Cell -from mindspore import context +import itertools import mindspore as ms import mindspore.ops as P +from mindspore import context +from mindspore.nn.cell import Cell +from mindspore._checkparam import Rel from mindspore.ops import functional as F -from mindspore.communication.management import get_group_size, get_rank from mindspore.communication import management -from mindspore._checkparam import check_int_positive +from mindspore.ops.operations import _inner_ops as inner from mindspore._extends import cell_attr_register from mindspore.ops._grad.grad_base import bprop_getters - +from mindspore._checkparam import Validator as validator +from mindspore.communication.management import get_group_size, get_rank def padding_format(padding): """ @@ -537,25 +538,17 @@ def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_cha if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] - # print(out_channel, k_size, self.padding, self.ms_stride, self.ms_dilation) self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=1 + dilation=self.ms_dilation, mode=1, group=1, data_format=self.data_format ) def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.conv2d(inputs, filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -588,8 +581,28 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) class Conv3D(Cell): - pass - # raise NotImplementedError + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): + super(Conv3D, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + if self.data_format is 'NDHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + raise NotImplementedError("The optional value for data format. Currently only support “NCDHW”.") + elif self.data_format is 'NCDHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.conv3d = P.Conv3D(out_channel=out_channel, + kernel_size=k_size, + pad_mode=self.padding, + stride=self.ms_stride, + dilation=self.ms_dilation, + data_format=data_format) + + def construct(self, input, filters): + outputs = self.conv3d(input, filters) + return outputs def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): @@ -677,23 +690,58 @@ def moments(x, axes, shift=None, keepdims=False): pass +class MaxPool1d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(MaxPool1d, self).__init__() + self.data_format, padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.expand = P.ExpandDims() + _strides = (1, strides[0]) + _ksize = (1, ksize[0]) + if self.data_format == 'NWC': + self.squeeze = P.Squeeze(1) + _data_format = 'NHWC' + if self.data_format == 'NCW': + self.squeeze = P.Squeeze(2) + _data_format = 'NCHW' + + self.max_pool = P.MaxPool( + kernel_size=_ksize, + strides=_strides, + pad_mode=padding, + data_format=_data_format + ) + + def construct(self, inputs): + if self.data_format == 'NWC': + x = self.expand(inputs, 1) + if self.data_format == 'NCW': + x = self.expand(inputs, 2) + output = self.max_pool(x) + output = self.squeeze(output) + return output + + class MaxPool(Cell): def __init__(self, ksize, strides, padding, data_format=None): super(MaxPool, self).__init__() - self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) - ms_ksize = ksize[1] - ms_strides = strides[1] - self.maxpool = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=self.padding) + data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) + + if data_format == 'NHWC': + _strides = (strides[1], strides[2]) + if data_format == 'NCHW': + _strides = (strides[2], strides[3]) + + self.maxpool = P.MaxPool( + kernel_size = ksize, + strides = _strides, + pad_mode = padding, + data_format = data_format + ) def construct(self, inputs): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.maxpool(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -710,7 +758,7 @@ def max_pool(input, ksize, strides, padding, data_format=None): ksize : int or list of ints An int or list of ints that has length 1, N or N+2. The size of the window for each dimension of the input tensor. - strides : int or list of ints + strides : list or list of ints An int or list of ints that has length 1, N or N+2. The stride of the sliding window for each dimension of the input tensor. padding : string @@ -722,17 +770,61 @@ def max_pool(input, ksize, strides, padding, data_format=None): """ data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) if data_format == 'NHWC': - input = nhwc_to_nchw(input) - - ms_ksize = ksize[1] - ms_strides = strides[2] - outputs = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=padding)(input) - # channel first to channel last - if data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + _strides = (strides[1], strides[2]) + if data_format == 'NCHW': + _strides = (strides[2], strides[3]) + outputs = P.MaxPool( + kernel_size=ksize, + strides=_strides, + pad_mode=padding, + data_format=data_format + )(input) return outputs + +class AvgPool1d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(AvgPool1d, self).__init__() + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.kernel_size = (1, ksize[0]) + self.stride = (1, strides[0]) + + if self.data_format == 'NWC': + _data_format = 'NHWC' + self.squeeze = P.Squeeze(1) + if self.data_format == 'NCW': + _data_format = 'NCHW' + self.squeeze = P.Squeeze(2) + + self.avg_pool = P.AvgPool(kernel_size=self.kernel_size, + strides=self.stride, + pad_mode=self.padding, + data_format=_data_format) + self.reduce_mean = P.ReduceMean(keep_dims=True) + self.slice = P.Slice() + self.expand = P.ExpandDims() + self.shape = P.Shape() + + def construct(self, inputs): + x = inputs + batch, channel, width = self.shape(inputs) + if width == self.kernel_size[1]: + x = self.reduce_mean(x, 2) + elif width - self.kernel_size[1] < self.stride[1]: + x = self.slice(x, (0, 0, 0), (batch, channel, self.kernel_size[1])) + x = self.reduce_mean(x, 2) + else: + if self.data_format == 'NCW': + x = self.expand(x, 2) + if self.data_format == 'NWC': + x = self.expand(x, 1) + x = self.avg_pool(x) + x = self.squeeze(x) + return x + + class AvgPool(Cell): def __init__(self, ksize, strides, padding, data_format=None): @@ -740,16 +832,10 @@ def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) ms_ksize = ksize[1] ms_strides = strides[1] - self.avgpool = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding) + self.avgpool = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding, data_format=self.data_format) def construct(self, inputs): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.avgpool(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -1056,7 +1142,7 @@ def construct(self, x, filters): output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) if self.data_format == 'NHWC': - output = nchw_to_nhwc(x) + output = nchw_to_nhwc(output) return output @@ -1099,7 +1185,32 @@ def conv2d_transpose( class Conv3d_transpose(Cell): - pass + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + super(Conv3d_transpose, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if self.data_format == 'NDHWC': + self.strides = (strides[1], strides[2], strides[3]) + self.dilations = (dilations[1], dilations[2], dilations[3]) + elif self.data_format == 'NCDHW': + self.strides = (strides[2], strides[3], strides[4]) + self.dilations = (dilations[2], dilations[3], dilations[4]) + + self.conv3d_transpose = P.Conv3DTranspose( + in_channel=in_channels, + out_channel=out_channel, + kernel_size=k_size, + mode=1, + pad_mode=padding, + stride=self.strides, + dilation=self.dilations, + data_format=self.data_format) + + def construct(self, input, filters): + output = self.conv3d_transpose(input, filters) + return output + def conv3d_transpose( @@ -1141,18 +1252,32 @@ class BatchNorm(Cell): """Batch Normalization base class.""" @cell_attr_register - def __init__( - self, num_features, epsilon=1e-5, decay=0.9, gamma=None, beta=None, moving_mean=None, moving_var=None, - is_train=None, device_num_each_group=1, data_format='channels_last' - ): + def __init__(self, + num_features, + epsilon=1e-5, + decay=0.9, + gamma=None, + beta = None, + moving_mean = None, + moving_var = None, + is_train = None, + device_num_each_group=1, + process_groups=0, + data_format='NCHW'): super(BatchNorm, self).__init__() + if data_format in ["channels_last", "NHWC", "nhwc"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW", "nchw"]: + data_format = "NCHW" + validator.check_value_type('num_features', num_features, [int], self.cls_name) if num_features < 1: raise ValueError("num_features must be at least 1") if decay < 0 or decay > 1: raise ValueError("momentum should be a number in range [0, 1], but got {}".format(decay)) - - self.data_format = data_format + self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) + if context.get_context("device_target") != "GPU" and self.format == "NHWC": + raise ValueError("NHWC format only support in GPU target.") self.use_batch_statistics = is_train self.num_features = num_features self.eps = epsilon @@ -1160,19 +1285,47 @@ def __init__( self.moving_variance = moving_var self.gamma = gamma self.beta = beta - self.group = check_int_positive(device_num_each_group) + self.group_device_num = validator.check_positive_int(device_num_each_group) + self.process_groups = process_groups self.is_global = False - if self.group != 1: + self.parallel_mode = context.get_auto_parallel_context("parallel_mode") + global SYNC_BN_GROUP_NAME + # for GlobalBatchNorm + if self.group_device_num != 1: self.rank_id = get_rank() self.rank_size = get_group_size() self.device_list = [i for i in range(0, self.rank_size)] - self.rank_list = self.list_group(self.device_list, self.group) + self.rank_list = self.list_group(self.device_list, self.group_device_num) self.rank_list_idx = len(self.rank_list) for i in range(self.rank_list_idx): - if self.rank_id in self.rank_list[i] and self.group != 1: + if self.rank_id in self.rank_list[i]: self.is_global = True - management.create_group('group' + str(i), self.rank_list[i]) - self.all_reduce = P.AllReduce(P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group"+ str(i) + management.create_group(SYNC_BN_GROUP_NAME, self.rank_list[i]) + # for SyncBatchNorm + if self.process_groups != 0: + self.rank_id = get_rank() + self.rank_size = get_group_size() + if self.process_groups is not None: + validator.check_isinstance("process_groups", self.process_groups, list) + self._check_rank_ids(self.process_groups, self.rank_size) + for i in range(len(self.process_groups)): + validator.check_isinstance("process_groups[" + str(i) +"]", self.process_groups[i], list) + self.group_device_num = len(self.process_groups[i]) + if self.rank_id in self.process_groups[i] and self.group_device_num > 1: + self.is_global = True + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group" + str(i) + management.create_group(SYNC_BN_GROUP_NAME, self.process_groups[i]) + elif self.rank_size > 1: + self.is_global = True + self.group_device_num = self.rank_size + self.device_list = [i for i in range(0, self.rank_size)] + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group0" + management.create_group(SYNC_BN_GROUP_NAME, self.device_list) + self.shape = P.Shape() self.reduce_mean = P.ReduceMean(keep_dims=True) self.square = P.Square() @@ -1180,8 +1333,7 @@ def __init__( self.cast = P.Cast() self.dtype = P.DType() self.reshape = P.Reshape() - self.is_ascend = context.get_context("device_target") == "Ascend" - self.is_gpu = context.get_context("device_target") == "GPU" + self._target = context.get_context("device_target") self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE self.momentum = 1.0 - decay if context.get_context("enable_ge"): @@ -1189,19 +1341,20 @@ def __init__( else: self.is_ge_backend = False - if self.is_graph_mode and (self.is_ge_backend or self.is_ascend): - self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) - elif self.is_gpu: - self.bn_train = P.FusedBatchNormEx(mode=1, epsilon=self.eps, momentum=self.momentum) - else: - self.bn_train = P.FusedBatchNorm(mode=1, epsilon=self.eps, momentum=self.momentum) - self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps) - self.enable_global_sync = self.is_global and (self.is_ge_backend or (self.is_graph_mode and self.is_ascend)) - self.enable_default_train = self.is_graph_mode and not self.is_global and \ - (self.is_ge_backend or self.is_ascend) - - data_parallel_strategy = ((1, ), (1, )) - data_parallel_strategy_one = ((1, ), ()) + self.bn_train = P.BatchNorm(is_training=True, + epsilon=self.eps, + momentum=self.momentum, + data_format=self.format) + if self.is_global: + self.bn_train = inner.SyncBatchNorm(epsilon=self.eps, + momentum=self.momentum, + group=SYNC_BN_GROUP_NAME, + device_num=self.group_device_num) + + self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format) + + data_parallel_strategy = ((1,), (1,)) + data_parallel_strategy_one = ((1,), ()) self.sub_mean = P.Sub().shard(data_parallel_strategy) self.sub_var = P.Sub().shard(data_parallel_strategy) self.mul_mean = P.Mul().shard(data_parallel_strategy_one) @@ -1209,116 +1362,54 @@ def __init__( self.assign_sub_mean = P.AssignSub().shard(data_parallel_strategy) self.assign_sub_var = P.AssignSub().shard(data_parallel_strategy) - def _check_data_dim(self, x): - raise NotImplementedError - def list_group(self, world_rank, group_size): if group_size > get_group_size(): - raise ValueError( - "group size can not be greater than local rank size, group size is {}, " - "local_rank_size is {}".format(group_size, get_group_size()) - ) + raise ValueError("group size can not be greater than local rank size, group size is {}, " + "local_rank_size is {}".format(group_size, get_group_size())) if len(world_rank) % group_size != 0: raise ValueError("please make your group size correct.") - world_rank_list = zip(*(iter(world_rank), ) * group_size) + world_rank_list = zip(*(iter(world_rank),) * group_size) group_list = [list(i) for i in world_rank_list] return group_list - def _global_sync(self, x, axes, re_shape): - """calculate global batch normalization output""" - x_mean = self.reduce_mean(x, axes) - x_mean_square = self.reduce_mean(self.square(x), axes) - global_batch_mean = self.all_reduce(x_mean) / self.group - global_batch_mean_square = self.all_reduce(x_mean_square) / self.group - global_mean = global_batch_mean - global_var = global_batch_mean_square - self.square(global_mean) - var_sqrt = self.sqrt(global_var + self.eps) - mean_first = (x - global_mean) / var_sqrt - y = mean_first * self.reshape(self.gamma, re_shape) + self.reshape(self.beta, re_shape) - - mean_sub = self.sub_mean(self.reshape(self.moving_mean, re_shape), global_mean) - tmp_mean = self.mul_mean(mean_sub, self.cast(self.momentum, self.dtype(mean_sub))) - mean_sub2 = self.sub_var(self.reshape(self.moving_mean, re_shape), global_var) - tmp_variance = self.mul_var(mean_sub2, self.cast(self.momentum, self.dtype(mean_sub2))) - y = F.depend(y, self.assign_sub_mean(self.moving_mean, self.reshape(tmp_mean, self.shape(self.moving_mean)))) - y = F.depend( - y, self.assign_sub_var(self.moving_variance, self.reshape(tmp_variance, self.shape(self.moving_variance))) - ) - return y - - def get_dim(self, input): - dim = len(self.shape(input)) - if dim == 2: - return '1d' - elif dim == 4: - return '2d' - else: - raise ValueError("The input must has 2 dims or 4 dims.") - - def _shape_check_bn(self, in_shape, in_dims): - dim = len(in_shape) - if in_dims == '1d' and dim != 2: - raise ValueError("The input must has 2 dims.") - if in_dims == '2d' and dim != 4: - raise ValueError("The input must has 4 dims.") - if in_dims == 'both' and dim != 2 and dim != 4: - raise ValueError("The input must has 2 dims or 4 dims.") - - def _shape_infer(self, x_shape, num_feature): - """global batch normalization shape and axes infer""" - if len(x_shape) == 4: - axes = (0, 2, 3) - re_shape = (1, num_feature, 1, 1) - else: - axes = (0, ) - re_shape = (1, num_feature) - return axes, re_shape + def _check_rank_ids(self, process_groups, rank_size): + seen = set() + for rid in itertools.chain(*process_groups): + validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups") + if rid in seen: + raise ValueError("rank id in process_groups should not be duplicated.") + seen.add(rid) def construct(self, inputs): - x = inputs - self._shape_check_bn(self.shape(x), self.get_dim(x)) - if self.use_batch_statistics is None: - flag = self.training - else: - flag = self.use_batch_statistics + x_shape = F.shape(inputs) + if len(x_shape) == 5: + inputs = self.reshape(inputs, (x_shape[0], x_shape[1], x_shape[2] * x_shape[3], x_shape[4])) + + flag = self.use_batch_statistics if flag: - if self.enable_global_sync: - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - axes, re_shape = self._shape_infer(F.shape(x), self.num_features) - y = self._global_sync(x, axes, re_shape) - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - - if self.enable_default_train: - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y, batch_mean, batch_var, _, _ = self.bn_train(x, self.gamma, self.beta, None, None) - - mean_sub = self.sub_mean(self.moving_mean, batch_mean) - temp_mean = self.mul_mean(mean_sub, self.momentum) - mean_sub2 = self.sub_var(self.moving_variance, batch_var) - temp_variance = self.mul_var(mean_sub2, self.momentum) - y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) - y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y = self.bn_train(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y = self.bn_infer(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y + output = self.bn_train(inputs, + self.gamma, + self.beta, + self.moving_mean, + self.moving_variance)[0] + + if len(x_shape) == 5: + output = self.reshape(output, x_shape) + return output + + output = self.bn_infer(inputs, + self.gamma, + self.beta, + self.moving_mean, + self.moving_variance)[0] + if len(x_shape) == 5: + output = self.reshape(output, x_shape) + return output + + def extend_repr(self): + return 'num_features={}, eps={}, momentum={}, gamma={}, beta={}, moving_mean={}, moving_variance={}'.format( + self.num_features, self.eps, self.momentum, self.gamma, self.beta, self.moving_mean, self.moving_variance) class GroupConv2D(Cell): @@ -1337,17 +1428,11 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=groups + dilation=self.ms_dilation, mode=1, group=groups, data_format=self.data_format ) def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.conv2d(inputs, filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -1407,30 +1492,23 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] self.depthwise_conv = P.Conv2D( out_channel=self.in_channel * self.depth_multiplier, kernel_size=self.k_size, pad_mode=self.padding, - stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel + stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel , data_format=self.data_format ) self.pointwise_conv = P.Conv2D( out_channel=self.out_channel, kernel_size=(1, 1), pad_mode=self.padding, stride=(1, 1), dilation=(1, 1), - mode=1, group=1 + mode=1, group=1 , data_format=self.data_format ) def construct(self, x, depthwise_filters, pointwise_filters): - if self.data_format == 'NHWC': - x = nhwc_to_nchw(x) - outputs = self.depthwise_conv(x, depthwise_filters) outputs = self.pointwise_conv(outputs, pointwise_filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -1545,10 +1623,8 @@ def construct(self, inputs): kernel_h = h - (out_h - 1) * stride_h stride_w = w // out_w kernel_w = w - (out_w - 1) * stride_w - outputs = P.MaxPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID')(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + outputs = P.MaxPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), + pad_mode='VALID', data_format=self.data_format)(inputs) return outputs @@ -1566,14 +1642,13 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=1 + dilation=self.ms_dilation, mode=1, group=1, data_format=self.data_format ) @bprop_getters.register(P.Sign) @@ -1590,16 +1665,9 @@ def bprop(x, out, dout): def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - filters = self.sign(filters) - outputs = self.conv2d(inputs, filters) - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) - return outputs diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py index f7334c0bc..69044b0ec 100644 --- a/tensorlayer/backend/ops/paddle_backend.py +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -816,6 +816,9 @@ def split(value, num_or_size_splits, axis=0, num=None): """ pass +class Floor(object): + def __call__(self, *args, **kwargs): + raise NotImplementedError def floor(x): raise NotImplementedError @@ -875,7 +878,7 @@ def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): pass -class Not_equal(object): +class NotEqual(object): def __init__(self): pass @@ -884,7 +887,7 @@ def __call__(self, x, y): pass -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype="int64"): pass @@ -950,6 +953,10 @@ def __call__(self, x): raise NotImplementedError +class Ceil(object): + def __call__(self, *args, **kwargs): + raise NotImplementedError + def ceil(x): raise NotImplementedError diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 535b9faaa..40d0f1a55 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -557,6 +557,17 @@ def moments(x, axes, shift=None, keepdims=False): pass +class MaxPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + raise NotImplementedError + + class MaxPool(object): def __init__(self, ksize, strides, padding, data_format=None): @@ -594,6 +605,16 @@ def max_pool(input, ksize, strides, padding, data_format=None): pass +class AvgPool1d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + raise NotImplementedError + + class AvgPool(object): def __init__(self, ksize, strides, padding, data_format=None): diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index 9d9a00fdd..f58632f24 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -845,6 +845,10 @@ def split(value, num_or_size_splits, axis=0, num=None): return tf.split(value=value, num_or_size_splits=num_or_size_splits, axis=axis, num=num) +class Floor(object): + def __call__(self, x): + return tf.floor(x) + def floor(x): return tf.floor(x) @@ -917,7 +921,7 @@ def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): return outputs -class Not_equal(object): +class NotEqual(object): def __init__(self): pass @@ -926,7 +930,7 @@ def __call__(self, x, y): return tf.not_equal(x, y) -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype=int64): self.keepdims = keepdims @@ -997,6 +1001,11 @@ def __call__(self, x): return tf.sign(x) +class Ceil(object): + def __call__(self, x): + return tf.math.ceil(x) + + def ceil(x): return tf.math.ceil(x) diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 5cefda342..e0d33c49d 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -652,6 +652,20 @@ def moments(x, axes, shift=None, keepdims=False): return outputs +class MaxPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.max_pool( + input=inputs, ksize=self.ksize, strides=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + class MaxPool(object): def __init__(self, ksize, strides, padding, data_format=None): @@ -711,6 +725,24 @@ def max_pool(input, ksize, strides, padding, data_format=None): return outputs +class AvgPool1d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.pool( + input=inputs, + window_shape=self.ksize, + pooling_type="AVG", + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + class AvgPool(object): def __init__(self, ksize, strides, padding, data_format=None): diff --git a/tensorlayer/cost/__init__.py b/tensorlayer/cost/__init__.py index 3ca7c2c81..9a7cbdd89 100644 --- a/tensorlayer/cost/__init__.py +++ b/tensorlayer/cost/__init__.py @@ -7,8 +7,6 @@ from .tensorflow_cost import * elif BACKEND == 'mindspore': from .mindspore_cost import * -elif BACKEND == 'dragon': - pass elif BACKEND == 'paddle': from .paddle_cost import * else: diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index d05a0c3b0..5c8805ac7 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -37,6 +37,8 @@ from mindspore.nn import Cell from mindspore import Tensor import mindspore as ms +if tl.BACKEND == 'paddle': + import paddle as pd if sys.version_info[0] == 2: from urllib import urlretrieve @@ -74,6 +76,7 @@ 'ms_variables_to_numpy', 'assign_tf_variable', 'assign_ms_variable', + 'assign_pd_variable', 'save_weights_to_hdf5', 'load_hdf5_to_weights_in_order', 'load_hdf5_to_weights', @@ -2098,6 +2101,8 @@ def save_npz_dict(save_list=None, name='model.npz'): save_list_var = tf_variables_to_numpy(save_list) elif tl.BACKEND == 'mindspore': save_list_var = ms_variables_to_numpy(save_list) + elif tl.BACKEND == 'paddle': + save_list_var = pd_variables_to_numpy(save_list) else: raise NotImplementedError('Not implemented') save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)} @@ -2148,6 +2153,11 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False): elif tl.BACKEND == 'mindspore': assign_param = Tensor(weights[key], dtype=ms.float32) assign_ms_variable(network.all_weights[net_weights_name.index(key)], assign_param) + elif tl.BACKEND == 'paddle': + assign_pd_variable(network.all_weights[net_weights_name.index(key)], weights[key]) + else: + raise NotImplementedError('Not implemented') + logging.info("[*] Model restored from npz_dict %s" % name) @@ -2593,6 +2603,15 @@ def ms_variables_to_numpy(variables): results = [v.data.asnumpy() for v in var_list] return results +def pd_variables_to_numpy(variables): + if not isinstance(variables, list): + var_list = [variables] + else: + var_list = variables + + results = [v.numpy() for v in var_list] + return results + def assign_tf_variable(variable, value): """Assign value to a TF variable""" @@ -2615,6 +2634,10 @@ def construct(self, x): Assign()(variable, value) +def assign_pd_variable(variable, value): + pd.assign(value, variable) + + def _save_weights_to_hdf5_group(f, layers): """ Save layer/model weights into hdf5 group recursively. diff --git a/tensorlayer/initializers/load_initializers_backend.py b/tensorlayer/initializers/load_initializers_backend.py index fc65bab8d..3f5492da4 100644 --- a/tensorlayer/initializers/load_initializers_backend.py +++ b/tensorlayer/initializers/load_initializers_backend.py @@ -7,9 +7,7 @@ if BACKEND == 'tensorflow': from .tensorflow_initializers import * elif BACKEND == 'mindspore': - from .tensorflow_initializers import * -elif BACKEND == 'dragon': - from .tensorflow_initializers import * + from .mindspore_initializers import * elif BACKEND == 'paddle': from .paddle_initializers import * else: diff --git a/tensorlayer/initializers/mindspore_initializers.py b/tensorlayer/initializers/mindspore_initializers.py new file mode 100644 index 000000000..52e0809ed --- /dev/null +++ b/tensorlayer/initializers/mindspore_initializers.py @@ -0,0 +1,256 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np +import tensorlayer as tl +from mindspore import Tensor +from mindspore.common import initializer + +__all__ = [ + 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', + 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' +] + + +class Initializer(object): + """Initializer base class: all initializers inherit from this class. + """ + + def __call__(self, shape, dtype=None): + """Returns a tensor object initialized as specified by the initializer. + + Parameters + ---------- + shape : tuple of int. + The shape of the tensor. + dtype : Optional dtype of the tensor. + If not provided will return tensor of `tl.float32`. + + Returns + ------- + + """ + raise NotImplementedError + + def get_config(self): + """Returns the configuration of the initializer as a JSON-serializable dict. + + Returns + ------- + A JSON-serializable Python dict. + """ + return {} + + @classmethod + def from_config(cls, config): + """Instantiates an initializer from a configuration dictionary. + + Parameters + ---------- + config : A python dictionary. + It will typically be the output of `get_config`. + + Returns + ------- + An Initializer instance. + """ + if 'dtype' in config: + config.pop('dtype') + return cls(**config) + + +class Zeros(Initializer): + """Initializer that generates tensors initialized to 0. + """ + def __init__(self): + self.zero = initializer.Zero() + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.zero(arr) + return Tensor(arr, dtype=dtype) + + +class Ones(Initializer): + """Initializer that generates tensors initialized to 1. + """ + def __init__(self): + self.one = initializer.One() + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.one(arr) + return Tensor(arr, dtype=dtype) + + +class Constant(Initializer): + """Initializer that generates tensors initialized to a constant value. + + Parameters + ---------- + value : A python scalar or a numpy array. + The assigned value. + + """ + + def __init__(self, value=0): + self.value = value + self.constant = initializer.Constant(value=value) + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.constant(arr) + return Tensor(arr, dtype=dtype) + + def get_config(self): + return {"value": self.value} + + +class RandomUniform(Initializer): + """Initializer that generates tensors with a uniform distribution. + + Parameters + ---------- + minval : A python scalar or a scalar tensor. + Lower bound of the range of random values to generate. + maxval : A python scalar or a scalar tensor. + Upper bound of the range of random values to generate. + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, minval=-0.05, maxval=0.05, seed=None): + self.minval = minval + self.maxval = maxval + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.random_uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed} + + +class RandomNormal(Initializer): + """Initializer that generates tensors with a normal distribution. + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the random values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.random_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class TruncatedNormal(Initializer): + """Initializer that generates a truncated normal distribution. + + These values are similar to values from a `RandomNormal` + except that values more than two standard deviations from the mean + are discarded and re-drawn. This is the recommended initializer for + neural network weights and filters. + + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the andom values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class HeNormal(Initializer): + """He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, seed=None): + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.he_normal(seed=self.seed, shape=shape, dtype=dtype) + + def get_config(self): + return {"seed", self.seed} + + +def deconv2d_bilinear_upsampling_initializer(shape): + """Returns the initializer that can be passed to DeConv2dLayer for initializing the + weights in correspondence to channel-wise bilinear up-sampling. + Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211) + + Parameters + ---------- + shape : tuple of int + The shape of the filters, [height, width, output_channels, in_channels]. + It must match the shape passed to DeConv2dLayer. + + Returns + ------- + ``tf.constant_initializer`` + A constant initializer with weights set to correspond to per channel bilinear upsampling + when passed as W_int in DeConv2dLayer + + """ + if shape[0] != shape[1]: + raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') + + if shape[3] < shape[2]: + raise Exception( + 'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ' + ) + + filter_size = shape[0] + num_out_channels = shape[2] + num_in_channels = shape[3] + + # Create bilinear filter kernel as numpy array + bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32) + scale_factor = (filter_size + 1) // 2 + if filter_size % 2 == 1: + center = scale_factor - 1 + else: + center = scale_factor - 0.5 + for x in range(filter_size): + for y in range(filter_size): + bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor) + weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels), dtype=np.float32) + for i in range(num_out_channels): + weights[:, :, i, i] = bilinear_kernel + + # assign numpy array to constant_initalizer and pass to get_variable + return Constant(value=weights) diff --git a/tensorlayer/layers/convolution/binary_conv.py b/tensorlayer/layers/convolution/binary_conv.py index e5ab6c5a4..5fa9b541e 100644 --- a/tensorlayer/layers/convolution/binary_conv.py +++ b/tensorlayer/layers/convolution/binary_conv.py @@ -49,8 +49,7 @@ class BinaryConv2d(Module): >>> net = tl.layers.Input([8, 100, 100, 32], name='input') >>> binaryconv2d = tl.layers.BinaryConv2d( - ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tl.relu, in_channels=32, name='binaryconv2d' - ... )(net) + ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, in_channels=32, name='binaryconv2d')(net) >>> print(binaryconv2d) >>> output shape : (8, 50, 50, 64) diff --git a/tensorlayer/layers/convolution/deformable_conv.py b/tensorlayer/layers/convolution/deformable_conv.py index 8a2cba09e..1e8920850 100644 --- a/tensorlayer/layers/convolution/deformable_conv.py +++ b/tensorlayer/layers/convolution/deformable_conv.py @@ -9,14 +9,12 @@ 'DeformableConv2d', ] - class DeformableConv2d(Module): """The :class:`DeformableConv2d` class is a 2D `Deformable Convolutional Networks `__. - Parameters ---------- - offset_layer : tf.Tensor + offset_layer : tl.Tensor To predict the offset of convolution operations. The shape is (batchsize, input height, input width, 2*(number of element in the convolution kernel)) e.g. if apply a 3*3 kernel, the number of the last dimension should be 18 (2*3*3) @@ -36,11 +34,9 @@ class DeformableConv2d(Module): The number of in channels. name : str A unique layer name. - Examples -------- With TensorLayer - >>> net = tl.layers.Input([5, 10, 10, 16], name='input') >>> offset1 = tl.layers.Conv2d( ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1' @@ -54,21 +50,20 @@ class DeformableConv2d(Module): >>> deformconv2 = tl.layers.DeformableConv2d( ... offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2' ... )(deformconv1) - References ---------- - The deformation operation was adapted from the implementation in `here `__ - Notes ----- - The padding is fixed to 'SAME'. - The current implementation is not optimized for memory usgae. Please use it carefully. - """ + # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, offset_layer=None, + # shape=(3, 3, 1, 100), n_filter=32, filter_size=(3, 3), act=None, @@ -76,7 +71,7 @@ def __init__( W_init=tl.initializers.truncated_normal(stddev=0.02), b_init=tl.initializers.constant(value=0.0), in_channels=None, - name=None + name=None # 'deformable_conv_2d', ): super().__init__(name, act=act) @@ -88,17 +83,10 @@ def __init__( self.b_init = b_init self.in_channels = in_channels - # layer forward state - self._forward_state = False - self.kernel_n = filter_size[0] * filter_size[1] if self.offset_layer.get_shape()[-1] != 2 * self.kernel_n: raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * self.kernel_n) - if self.in_channels is not None: - self.build(None) - self._built = True - logging.info( "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" % ( self.name, self.n_filter, str(self.filter_size @@ -122,13 +110,14 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - if self.in_channels is None: - self.in_channels = inputs_shape[-1] + + self.in_channels = inputs_shape[-1] self.input_h = int(inputs_shape[1]) self.input_w = int(inputs_shape[2]) - initial_offsets = tl.ops.stack(tl.ops.meshgrid(tl.ops.range(self.filter_size[0]), - tl.ops.range(self.filter_size[1]), indexing='ij')) # initial_offsets --> (kh, kw, 2) + initial_offsets = tl.ops.stack( + tl.ops.meshgrid(tl.ops.range(self.filter_size[0]), tl.ops.range(self.filter_size[1]), indexing='ij') + ) # initial_offsets --> (kh, kw, 2) initial_offsets = tl.ops.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) @@ -168,6 +157,7 @@ def forward(self, inputs): self._built = True self._forward_state = True + # shape = (filter_size[0], filter_size[1], pre_channel, n_filter) offset = self.offset_layer grid_offset = self.grid_offset @@ -219,21 +209,17 @@ def _tf_repeat(self, a, repeats): def _tf_batch_map_coordinates(self, inputs, coords): """Batch version of tf_map_coordinates - Only supports 2D feature maps - Parameters ---------- - inputs : ``tf.Tensor`` + inputs : ``tl.Tensor`` shape = (b*c, h, w) - coords : ``tf.Tensor`` + coords : ``tl.Tensor`` shape = (b*c, h, w, n, 2) - Returns ------- - ``tf.Tensor`` + ``tl.Tensor`` A Tensor with the shape as (b*c, h, w, n) - """ inputs_shape = inputs.get_shape() coords_shape = coords.get_shape() @@ -243,8 +229,8 @@ def _tf_batch_map_coordinates(self, inputs, coords): kernel_n = int(coords_shape[3]) n_coords = input_h * input_w * kernel_n - coords_lt = tl.ops.cast(tl.ops.floor(coords), 'int32') - coords_rb = tl.ops.cast(tl.ops.ceil(coords), 'int32') + coords_lt = tl.ops.cast(tl.ops.Floor()(coords), 'int32') + coords_rb = tl.ops.cast(tl.ops.Ceil()(coords), 'int32') coords_lb = tl.ops.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) coords_rt = tl.ops.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) @@ -265,21 +251,18 @@ def _tf_batch_map_coordinates(self, inputs, coords): def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): """Batch map offsets into input - Parameters ------------ - inputs : ``tf.Tensor`` + inputs : ``tl.Tensor`` shape = (b, h, w, c) - offsets: ``tf.Tensor`` + offsets: ``tl.Tensor`` shape = (b, h, w, 2*n) - grid_offset: `tf.Tensor`` + grid_offset: `tl.Tensor`` Offset grids shape = (h, w, n, 2) - Returns ------- - ``tf.Tensor`` + ``tl.Tensor`` A Tensor with the shape as (b, h, w, c) - """ inputs_shape = inputs.get_shape() batch_size = tl.get_tensor_shape(inputs)[0] @@ -293,8 +276,6 @@ def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): # offsets (b, h, w, 2*n) --> (b, h, w, n, 2) offsets = tl.ops.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2)) - # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2) - # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1]) coords = tl.ops.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2) coords = tl.ops.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2) @@ -314,11 +295,3 @@ def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): return mapped_vals -if __name__ == '__main__': - net = tl.layers.Input([5, 10, 10, 16], name='input') - offset1 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1', in_channels=16)(net) - deformconv1 = DeformableConv2d(offset_layer=offset1, n_filter=32, filter_size=(3, 3), name='deformable1')(net) - offset2 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset2', in_channels=32)(deformconv1) - deformconv2 = DeformableConv2d(offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2')(deformconv1) - print(deformconv2) - diff --git a/tensorlayer/layers/convolution/depthwise_conv.py b/tensorlayer/layers/convolution/depthwise_conv.py index bac18dec7..e84e0d062 100644 --- a/tensorlayer/layers/convolution/depthwise_conv.py +++ b/tensorlayer/layers/convolution/depthwise_conv.py @@ -138,7 +138,7 @@ def build(self, inputs_shape): if BACKEND == 'mindspore': self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, 1) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) self.depthwise_conv2d = tl.ops.DepthwiseConv2d( strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, diff --git a/tensorlayer/layers/convolution/dorefa_conv.py b/tensorlayer/layers/convolution/dorefa_conv.py index 50396cd7e..de82b50c3 100644 --- a/tensorlayer/layers/convolution/dorefa_conv.py +++ b/tensorlayer/layers/convolution/dorefa_conv.py @@ -52,7 +52,7 @@ class DorefaConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 32], name='input') >>> dorefaconv2d = tl.layers.DorefaConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.relu, padding='SAME', name='dorefaconv2d' + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='dorefaconv2d' ... )(net) >>> print(dorefaconv2d) >>> output shape : (8, 12, 12, 32) diff --git a/tensorlayer/layers/convolution/quan_conv.py b/tensorlayer/layers/convolution/quan_conv.py index 87f4f5256..7a46c3d77 100644 --- a/tensorlayer/layers/convolution/quan_conv.py +++ b/tensorlayer/layers/convolution/quan_conv.py @@ -55,7 +55,7 @@ class QuanConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 64], name='input') >>> quanconv2d = tl.layers.QuanConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d' + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='quancnn2d' ... )(net) >>> print(quanconv2d) >>> output shape : (8, 12, 12, 32) diff --git a/tensorlayer/layers/convolution/separable_conv.py b/tensorlayer/layers/convolution/separable_conv.py index b837e4ed7..73390f1ea 100644 --- a/tensorlayer/layers/convolution/separable_conv.py +++ b/tensorlayer/layers/convolution/separable_conv.py @@ -47,9 +47,10 @@ class SeparableConv1d(Module): -------- With TensorLayer >>> net = tl.layers.Input([8, 50, 64], name='input') - >>> separableconv1d = tl.layers.SeparableConv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d')(net) + >>> separableconv1d = tl.layers.SeparableConv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tl.ReLU, name='separable_1d')(net) >>> print(separableconv1d) >>> output shape : (8, 25, 32) + """ def __init__( @@ -112,10 +113,10 @@ def build(self, inputs_shape): if BACKEND == 'tensorflow': self.depthwise_filter_shape = (self.filter_size, self.in_channels, self.depth_multiplier) - self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) elif BACKEND == 'mindspore': self.depthwise_filter_shape = (self.filter_size, 1, self.depth_multiplier * self.in_channels) - self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) + + self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) self.depthwise_W = self._get_weights( 'depthwise_filters', shape=self.depthwise_filter_shape, init=self.depthwise_init @@ -191,7 +192,7 @@ class SeparableConv2d(Module): -------- With TensorLayer >>> net = tl.layers.Input([8, 50, 50, 64], name='input') - >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tf.nn.relu, name='separable_2d')(net) + >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tl.ReLU, name='separable_2d')(net) >>> print(separableconv2d) >>> output shape : (8, 24, 24, 32) """ @@ -308,12 +309,3 @@ def forward(self, inputs): outputs = self.act(outputs) return outputs - -if __name__ == '__main__': - net = tl.layers.Input([5, 400, 400, 3], name='input') - layer = SeparableConv2d( - in_channels=3, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=3, - name='separableconv2d1' - ) - print(len(layer.all_weights)) - print(layer(net).shape) diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index e78dfbd8a..5af052262 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -51,7 +51,7 @@ class Conv1d(Module): >>> net = tl.layers.Input([8, 100, 1], name='input') >>> conv1d = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='conv1d_1') >>> print(conv1d) - >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='conv1d_2')(net) + >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tl.ReLU, name='conv1d_2')(net) >>> print(tensor) """ @@ -192,7 +192,7 @@ class Conv2d(Module): >>> net = tl.layers.Input([8, 3, 400, 400], name='input') >>> conv2d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_1') >>> print(conv2d) - >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_2')(net) + >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_2')(net) >>> print(tensor) """ @@ -337,7 +337,7 @@ class Conv3d(Module): >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') >>> conv3d = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='conv3d_1') >>> print(conv3d) - >>> tensor = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='conv3d_2')(net) + >>> tensor = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ReLU, name='conv3d_2')(net) >>> print(tensor) """ @@ -427,7 +427,7 @@ def build(self, inputs_shape): self.conv3d = tl.ops.Conv3D( strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, - out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]) + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]) ) self.act_init_flag = False @@ -486,7 +486,7 @@ class DeConv1d(Module): >>> net = tl.layers.Input([8, 100, 1], name='input') >>> conv1d = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='Deonv1d_1') >>> print(conv1d) - >>> tensor = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='Deconv1d_2')(net) + >>> tensor = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, act=tl.ReLU, name='Deconv1d_2')(net) >>> print(tensor) """ @@ -634,7 +634,7 @@ class DeConv2d(Module): >>> net = tl.layers.Input([8, 3, 400, 400], name='input') >>> conv2d_transpose = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_transpose_1') >>> print(conv2d_transpose) - >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_transpose_2')(net) + >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_transpose_2')(net) >>> print(tensor) """ @@ -709,7 +709,7 @@ def build(self, inputs_shape): #TODO channels first filter shape [out_channel, in_channel, filter_h, filter_w] self.filter_shape = (self.filter_size[0], self.filter_size[1], self.n_filter, self.in_channels) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) self.b_init_flag = False if self.b_init: @@ -781,7 +781,7 @@ class DeConv3d(Module): >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') >>> deconv3d = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='deconv3d_1') >>> print(deconv3d) - >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='deconv3d_2')(net) + >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ReLU, name='deconv3d_2')(net) >>> print(tensor) """ @@ -858,7 +858,7 @@ def build(self, inputs_shape): self.filter_size[0], self.filter_size[1], self.filter_size[2], self.n_filter, self.in_channels ) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) @@ -871,7 +871,8 @@ def build(self, inputs_shape): self.conv3d_transpose = tl.ops.Conv3d_transpose( strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, - out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]) + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]), + in_channels=self.in_channels ) self.act_init_flag = False diff --git a/tensorlayer/layers/convolution/ternary_conv.py b/tensorlayer/layers/convolution/ternary_conv.py index 5b60ae052..b8ebfd4f4 100644 --- a/tensorlayer/layers/convolution/ternary_conv.py +++ b/tensorlayer/layers/convolution/ternary_conv.py @@ -50,7 +50,7 @@ class TernaryConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 32], name='input') >>> ternaryconv2d = tl.layers.TernaryConv2d( - ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d' + ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='ternaryconv2d' ... )(net) >>> print(ternaryconv2d) >>> output shape : (8, 12, 12, 64) diff --git a/tensorlayer/layers/core/common.py b/tensorlayer/layers/core/common.py index 1af257f12..a0ace214d 100644 --- a/tensorlayer/layers/core/common.py +++ b/tensorlayer/layers/core/common.py @@ -37,44 +37,45 @@ def str2act(act): raise Exception("Unsupported act: {}".format(act)) return _act_dict[act] -def _save_weights(self, file_path, format=None): +def _save_weights(net, file_path, format=None): """Input file_path, save model weights into a file of given format. - Use self.load_weights() to restore. - - Parameters - ---------- - file_path : str - Filename to which the model weights will be saved. - format : str or None - Saved file format. - Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - 1) If this is set to None, then the postfix of file_path will be used to decide saved format. - If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. - 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of - the hdf5 file. - 3) 'npz' will save model weights sequentially into a npz file. - 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. - 5) 'ckpt' will save model weights into a tensorflow ckpt file. - - Default None. - - Examples - -------- - 1) Save model weights in hdf5 format by default. - >>> net = vgg16() - >>> net.save_weights('./model.h5') - ... - >>> net.load_weights('./model.h5') - - 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') - - """ - - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: + Use net.load_weights() to restore. + + Parameters + ---------- + file_path : str + Filename to which the model weights will be saved. + format : str or None + Saved file format. + Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. + If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. + 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of + the hdf5 file. + 3) 'npz' will save model weights sequentially into a npz file. + 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. + 5) 'ckpt' will save model weights into a tensorflow ckpt file. + + Default None. + + Examples + -------- + 1) Save model weights in hdf5 format by default. + >>> net = vgg16() + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.save_weights('./model.h5') + ... + >>> model.load_weights('./model.h5') + + 2) Save model weights in npz/npz_dict format + >>> model.save_weights('./model.npz') + >>> model.save_weights('./model.npz', format='npz_dict') + + """ + + if net.all_weights is None or len(net.all_weights) == 0: logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") return @@ -86,11 +87,11 @@ def _save_weights(self, file_path, format=None): format = 'hdf5' if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) + utils.save_weights_to_hdf5(file_path, net) elif format == 'npz': - utils.save_npz(self.all_weights, file_path) + utils.save_npz(net.all_weights, file_path) elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) + utils.save_npz_dict(net.all_weights, file_path) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") @@ -100,8 +101,8 @@ def _save_weights(self, file_path, format=None): "Other format is not supported now." ) -def _load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights(). +def _load_weights(net, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by net.save_weights(). Parameters ---------- @@ -110,7 +111,7 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): format : str or None If not specified (None), the postfix of the file_path will be used to decide its format. If specified, value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - In addition, it should be the same format when you saved the file using self.save_weights(). + In addition, it should be the same format when you saved the file using net.save_weights(). Default is None. in_order : bool Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. @@ -122,7 +123,7 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): skip : bool Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights - whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will + whose name is not found in model weights (net.all_weights) will be skipped. If 'skip' is False, error will occur when mismatch is found. Default is False. @@ -130,14 +131,17 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): -------- 1) load model from a hdf5 file. >>> net = vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> model.load_weights('./model_eager.h5') # load sequentially 2) load model from a npz file - >>> net.load_weights('./model.npz') + >>> model.load_weights('./model.npz') - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') + 3) load model from a npz file, which is saved as npz_dict previously + >>> model.load_weights('./model.npz', format='npz_dict') Notes ------- @@ -156,14 +160,14 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): if format == 'hdf5' or format == 'h5': if skip ==True or in_order == False: # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) + utils.load_hdf5_to_weights(file_path, net, skip) else: # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) + utils.load_hdf5_to_weights_in_order(file_path, net) elif format == 'npz': - utils.load_and_assign_npz(file_path, self) + utils.load_and_assign_npz(file_path, net) elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) + utils.load_and_assign_npz_dict(file_path, net, skip) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") diff --git a/tensorlayer/layers/core/core_dragon.py b/tensorlayer/layers/core/core_dragon.py deleted file mode 100644 index f07772c2c..000000000 --- a/tensorlayer/layers/core/core_dragon.py +++ /dev/null @@ -1,765 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -#TODO Dragon Module needs a better implementation - -import time -import dragon as dg -import tensorlayer as tl -from tensorlayer.layers.utils import (get_variable_with_initializer) -from .common import str2act, _save_weights, _load_weights -from collections import OrderedDict -from tensorlayer import logging - -__all__ = ['Module', 'SequentialLayer', 'LayerList'] - -_global_layer_name_dict = {} -Parameter_ = dg.Tensor - -class Module(object): - - def __init__(self, name=None, act=None, *args, **kwargs): - self._params = OrderedDict() - self._layers = OrderedDict() - self._params_status = OrderedDict() - self._parameter_layout_dict = {} - self._create_time = int(time.time() * 1e9) - - global _global_layer_name_dict - if name is None: - prefix = self.__class__.__name__.lower() - - if _global_layer_name_dict.get(prefix) is not None: - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - _global_layer_name_dict[prefix] = 0 - name = prefix - while True: - if _global_layer_name_dict.get(name) is None: - break - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - if _global_layer_name_dict.get(name) is not None: - pass - else: - _global_layer_name_dict[name] = 0 - - self.name = name - - if isinstance(act, str): - str_act = str2act(act) - - if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): - self.act = str_act - elif isinstance(act, str): - self.act = str_act() - else: - self.act = act() - else: - self.act = act - - # Layer building state - self._built = False - - # Layer nodes state - self._nodes = [] - self._nodes_fixed = False - - # Layer weight state - self._all_weights = [] - self._trainable_weights = [] - self._nontrainable_weights = [] - - # layer forward state - self._forward_state = False - - # Layer training state - self.is_train = True - - def extend_repr(self): - """ - Sets the extended representation of the Module. - - To print customized extended information, re-implement this method in your own Layers. - """ - return '' - - def __repr__(self): - extra_str = self.extend_repr() - info_str = self.__class__.__name__ + '<' - if self._layers: - sub_str = '\n' - if extra_str: - sub_str += '{}\n'.format(self.extend_repr()) - for key, value in self._layers.items(): - sub_str += '({}): {}\n'.format(key, repr(value)) - sub_str = sub_str.replace('\n', '\n ') + '>' - info_str += sub_str - else: - info_str += extra_str + '>' - return info_str - - def __setattr__(self, name, value): - layers = self.__dict__.get('_layers') - params = self.__dict__.get('_params') - - if isinstance(value, Parameter_): - if params is None: - raise AttributeError("Can not assign params before Module.__init__() call.") - if name in self.__dict__: - if self.__dict__[name] is not None: - raise TypeError("Expected type is not in (Parameter, Module), but got Parameter.") - del self.__dict__[name] - if layers and name in layers: - raise TypeError("Expected type is Module, but got Parameter.") - self.insert_param_to_layer(name, value) - - elif isinstance(value, Module): - if layers is None: - raise AttributeError("Can not assign layers before Module.__init__() call.") - if name in self.__dict__: - del self.__dict__[name] - if params and name in params: - raise TypeError("Expected type is Parameter, but got Module.") - # TODO How to prompt the user, enter the in_channels. - # TODO Automatic shape inference when the user does not enter inchannels. - # if value._built is False: - # raise AttributeError( - # "The registered layer `{}` should be built in advance. " - # "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) - # ) - layers[name] = value - else: - object.__setattr__(self, name, value) - - def __call__(self, inputs, *args, **kwargs): - - output = self.forward(inputs, *args, **kwargs) - - return output - - def forward(self, *inputs, **kwargs): - raise Exception("The forward method must be implemented by inherited class") - - def build(self, inputs_shape): - raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): - """ Get trainable variables. """ - weight = get_variable_with_initializer( - scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable - ) - self.trainable = trainable - return weight - - def save_weights(self, file_path, format=None): - """Input file_path, save model weights into a file of given format.""" - _save_weights(self, file_path, format) - - def load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights().""" - _load_weights(self, file_path, format, in_order, skip) - - def _set_mode_for_layers(self, is_train): - """Set all layers of this network to a given mode. - - Parameters - ---------- - is_train : boolean - Network's mode. True means training mode while False means evaluation mode. - - """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - if isinstance(layer, Module): - layer.is_train = is_train - - - def set_train(self): - """Set this network in training mode. After calling this method, - all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. - TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.vgg16() - >>> net.set_train() - - """ - if self.is_train !=True: - self.is_train = True - self._set_mode_for_layers(True) - - def set_eval(self): - """Set this network in evaluation mode. After calling this method, - all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. - TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.vgg16() - >>> net.eval() - # do evaluation - - """ - if self.is_train != False: - self.is_train = False - self._set_mode_for_layers(False) - - def test(self): - """Set this network in evaluation mode.""" - self.eval() - - def infer(self): - """Set this network in evaluation mode.""" - self.eval() - - @staticmethod - def _compute_shape(tensors): - if isinstance(tensors, list): - shape_mem = [tl.get_tensor_shape(t) for t in tensors] - else: - shape_mem = tl.get_tensor_shape(tensors) - return shape_mem - - def insert_param_to_layer(self, param_name, param, check_name=True): - """ - Adds a parameter to the current layer. - - Inserts a parameter with given name to the layer. Please refer to the usage in - source code of `tensorlayer.layer.Module.__setattr__`. - - Args: - param_name (str): Name of the parameter. - param (Parameter): Parameter to be inserted to the layer. - check_name (bool): Determines whether the name input is compatible. Default: True. - - Raises: - KeyError: If the name of parameter is null or contains dot. - AttributeError: If user did not call init() first. - TypeError: If the type of parameter is not Parameter_. - """ - if not param_name: - raise KeyError("The name of parameter should not be null.") - if check_name and '.' in param_name: - raise KeyError("The name of parameter should not contain \".\"") - if '_params' not in self.__dict__: - raise AttributeError("You need call init() first.") - if hasattr(self, param_name) and param_name not in self._params: - raise KeyError("Duplicated parameter name '{}'.".format(param_name)) - if not isinstance(param, Parameter_) and param is not None: - raise TypeError("The type of parameter should be 'Parameter' if not None.") - self._params[param_name] = param - try: - self._params_status[param_name] = self.trainable - except: - pass - - def _add_node(self, input_tensors, output_tensors): - """Add a LayerNode for this layer given input_tensors, output_tensors. - - WARINING: This function should not be called from outside, it should only be called - in layer.__call__ when building static model. - - Parameters - ---------- - input_tensors : Tensor or a list of tensors - Input tensors to this layer. - output_tensors : Tensor or a list of tensors - Output tensors to this layer. - - """ - raise NotImplementedError - - @property - def create_time(self): - return self._create_time - - def __getattr__(self, name): - if '_params' in self.__dict__: - params = self.__dict__['_params'] - if name in params: - return params[name] - if '_layers' in self.__dict__: - layers = self.__dict__['_layers'] - if name in layers: - return layers[name] - if '_params_status' in self.__dict__: - params_status = self.__dict__['_params_status'] - if name in params_status: - return params_status[name] - raise AttributeError("'{}' object has no attribute '{}'.".format(type(self).__name__, name)) - - def __delattr__(self, name): - if name in self._params: - del self._params[name] - elif name in self._layers: - del self._layers[name] - else: - object.__delattr__(self, name) - - @property - def trainable_weights(self): - """ - Returns all trainable weights. - - Returns a list of all trainable parmeters. - - Args: - recurse (bool): Whether contains the trainable weights of sublayers. Default: True. - - Returns: - List, the list of trainable weights. - """ - self.get_weights() - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] ==True: - self._trainable_weights.append(params[1]) - return self._trainable_weights - - @property - def nontrainable_weights(self): - """ - Returns all untrainable weights. - - Returns a list of all untrainable weights. - - Args: - recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. - - Returns: - List, the list of untrainable weights. - """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] == False: - self._nontrainable_weights.append(params[1]) - return self._nontrainable_weights - - @property - def all_weights(self): - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - for par, val in params: - self._all_weights.append(val) - return self._all_weights - - def get_weights(self, expand=True): - """ - Returns an iterator over layer weights. - - Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. - - Args: - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. - - Examples: - >>> net = Net() - >>> for item in net.get_weights(): - >>> print(item) - """ - for _, param in self.parameters_and_names(expand=expand): - yield param - - def check_names(self): - names = set("") - for value, param in self.parameters_and_names(): - if param.name in names: - raise ValueError( - "The value of {} is {}, its name '{}' already exists.".format(value, param, param.name) - ) - names.add(param.name) - - def insert_child_to_layer(self, child_name, child): - """ - Adds a child layer to the current layer. - - Args: - child_name (str): Name of the child layer. - child (Module): The child layer to be inserted. - - Raises: - KeyError: Child Module's name is incorrect or duplicated with the other child name. - TypeError: Child Module's type is incorrect. - """ - if not child_name or '.' in child_name: - raise KeyError("Child layer name is incorrect.") - if hasattr(self, child_name) and child_name not in self._layers: - raise KeyError("Duplicate child name '{}'.".format(child_name)) - if not isinstance(child, Module) and child is not None: - raise TypeError("Child layer type is incorrect.") - self._layers[child_name] = child - - def parameters_and_names(self, name_prefix='', expand=True): - """ - Returns an iterator over layer parameters. - - Includes the parameter's name and itself. - - Args: - name_prefix (str): Namespace. Default: ''. - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. - - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.parameters_and_names(): - >>> if m[0]: - >>> names.append(m[0]) - """ - layers = [] - if expand: - layers = self.layers_and_names(name_prefix=name_prefix) - else: - layers.append((name_prefix, self)) - - params_set = set() - for layer_name, layer in layers: - params = layer._params.items() - for par_name, par in params: - if par.inited_param is not None: - par = par.inited_param - if par is not None and id(par) not in params_set: - params_set.add(id(par)) - par_new_name = par_name - if layer_name: - par_new_name = layer_name + '.' + par_new_name - - yield par_new_name, par - - def layers_and_names(self, layers=None, name_prefix=''): - """ - Returns an iterator over all layers in the network. - - Includes the layer's name and itself. - - Args: - layers (str): layers to iterate over. Default: None. - name_prefix (str): Namespace. Default: ''. - - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.layers_and_names(): - >>> if m[0]: - >>> names.append(m[0]) - """ - t_layers = layers if layers else set() - if self in t_layers: - return - - t_layers.add(self) - yield name_prefix, self - - for name, layer in self._layers.items(): - if layer: - layers_name_prefix = name - if name_prefix: - layers_name_prefix = name_prefix + '.' + layers_name_prefix - for ele in layer.layers_and_names(t_layers, layers_name_prefix): - yield ele - - def layers(self): - """Returns an iterator over immediate layers.""" - return self.name_layers().values() - - def name_layers(self): - """ - Returns an iterator over all layers in the network. - - Include name of the layer and layer itself. - """ - value_set = set() - layers = OrderedDict() - for name, layer in self._layers.items(): - if layer is not None and layer not in value_set: - value_set.add(layer) - layers[name] = layer - return layers - - def init_build(self, *inputs, **kwargs): - """ - (1) This method must be called when the Layer has no input in_channels. - (2) Automatic shape inference when the user does not enter inchannels. - """ - - self.forward(*inputs, **kwargs) - - -class SequentialLayer(Module): - """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. - - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. - - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> seq = tl.layers.SequentialLayer([conv, bn]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) - """ - def __init__(self, *args): - super(SequentialLayer, self).__init__() - self._built = True - if len(args) == 1: - layers = args[0] - if isinstance(layers, list): - for index, layer in enumerate(layers): - self.insert_child_to_layer(str(index), layer) - elif isinstance(layers, OrderedDict): - for name, layer in layers.items(): - self.insert_child_to_layer(name, layer) - else: - raise TypeError('Layers must be list or orderedDict') - else: - for index, layer in enumerate(args): - self.insert_child_to_layer(str(index), layer) - self.layer_list = list(self._layers.values()) - - def __getitem__(self, index): - if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) - index = self._valid_index(len(self), index) - return list(self._layers.values())[index] - - def __setitem__(self, index, layer): - if self._valid_module(layer): - index = self._valid_index(len(self), index) - key = list(self._layers.keys())[index] - self._layers[key] = layer - self.layer_list = list(self._layers.values()) - - def __delitem__(self, index): - if isinstance(index, int): - index = self._valid_index(len(self), index) - key = list(self._layers.keys())[index] - del self._layers[key] - elif isinstance(index, slice): - keys = list(self._layers.keys())[index] - for key in keys: - del self._layers[key] - else: - raise TypeError('Index {} is not int type or slice type'.format(index)) - self.layer_list = list(self._layers.values()) - - def __len__(self): - return len(self._layers) - - - def append(self, layer): - if self._valid_module(layer): - self._layers[str(len(self))] = layer - self.layer_list = list(self._layers.values()) - return self - - def build(self, inputs_shape): - pass - - def forward(self, input_data): - for layer in self.layer_list: - input_data = layer(input_data) - return input_data - - def _valid_index(self, layer_num, index): - if not isinstance(index, int): - raise TypeError("Index {} is not int type") - if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) - return index % layer_num - - def _valid_module(self, layer): - if issubclass(layer.__class__, Module): - return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) - - -class LayerList(Module): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - self.layers = layers - is_built = True - for layer in self.layers: - self._trainable_weights.extend(layer.trainable_weights) - self._nontrainable_weights.extend(layer.nontrainable_weights) - if layer._built is False: - is_built = False - # if layer._built and layer.all_weights is not None: - # # some layers in the list passed in have already been built - # # e.g. using input shape to construct layers in dynamic eager - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - if is_built: - self._built = True - - logging.info( - "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) - ) - - # check layer name uniqueness in LayerList - local_layer_name_set = set() - for layer in self.layers: - if layer.name not in local_layer_name_set: - local_layer_name_set.add(layer.name) - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return LayerList(list(self.layers)[idx]) - else: - return self.layers[idx] - - def __len__(self): - return len(self.layers) - - def __repr__(self): - tmpstr = 'LayerList' + '(\n' - for idx, layer in enumerate(self.layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' - - tmpstr = tmpstr + ')' - return tmpstr - - @property - def trainable_weights(self): - return self._trainable_weights - - @property - def nontrainable_weights(self): - return self._nontrainable_weights - - @property - def all_weights(self): - return self._trainable_weights + self._nontrainable_weights - - # def build(self, inputs_shape): - # """ - # Build the LayerList. The layer instances will be connected automatically one by one. - # """ - # in_tensor = self._input_tensors - # # in_layer = self._input_layer - # for layer in self.layers: - # is_build = layer._built - # out_tensor = layer(in_tensor) - # # nlayer = layer(in_layer) - # if is_build is False and layer.all_weights is not None: - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - # layer._built = True - # in_tensor = out_tensor - # # in_layer = nlayer - - def forward(self, inputs): - """ - Forward the computation. The computation will go through all layer instances. - """ - z = inputs - for layer in self.layers: - z = layer.forward(z) - return z - - def _set_mode_for_layers(self, is_train): - """Set training/evaluation mode for all layer instances.""" - self.is_train = is_train - for layer in self.layers: - if isinstance(layer, LayerList): - layer._set_mode_for_layers(is_train) - else: - layer.is_train = is_train - - def get_args(self): - init_args = {} - layers = self.layer_args["layers"] - init_args["layers"] = [layer.config for layer in layers] - init_args.update({"layer_type": "layerlist"}) - return init_args - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s \ No newline at end of file diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index b8bfe0d50..4d346b47e 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -4,10 +4,9 @@ from .common import str2act, _save_weights, _load_weights from mindspore.nn import Cell import tensorlayer as tl -from tensorlayer.layers.utils import (get_variable_with_initializer) from collections import OrderedDict -__all__ = ['Module', 'SequentialLayer', 'LayerList'] +__all__ = ['Module', 'SequentialLayer'] _global_layer_name_dict = {} # TODO: better implementation? @@ -72,6 +71,9 @@ def __init__(self, name=None, act=None, *args, **kwargs): # layer forward state self._forward_state = False + # data_format + self.data_format = "NCHW" + def forward(self, *inputs, **kwargs): raise Exception("The forward method must be implemented by inherited class") @@ -81,13 +83,25 @@ def construct(self, *inputs, **kwargs): def build(self, inputs_shape): raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True, transposed=False): """ Get trainable variables. """ - weight = get_variable_with_initializer( - scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable - ) + var_name = self.name + "/" + var_name + # TODO 2D mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w] + # TODO 2D mindspore transposed shape [in_channel, out_channel, kernel_h, kernel_w] + if len(shape) == 3: + shape = shape[::-1] + if len(shape) == 4: + if not transposed and self.data_format == 'NHWC': + shape = (shape[3], shape[0], shape[1], shape[2]) + else: + shape = (shape[3], shape[2], shape[0], shape[1]) + if len(shape) == 5: + shape = (shape[4], shape[3], shape[0], shape[1], shape[2]) + + initial_value = init(shape=shape) + var = tl.Variable(initial_value=initial_value, name=var_name, trainable=trainable) self.trainable = trainable - return weight + return var def save_weights(self, file_path, format=None): """Input file_path, save model weights into a file of given format.""" @@ -197,32 +211,36 @@ def all_weights(self): class SequentialLayer(Module): """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. + The class :class:`SequentialLayer` is a linear stack of layers. + The :class:`SequentialLayer` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. + Parameters + ---------- + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + Examples + --------- + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> relu = tl.ReLU() - >>> seq = tl.layers.SequentialLayer([conv, bn, relu]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) """ + def __init__(self, *args): super(SequentialLayer, self).__init__() # self._built = True @@ -303,53 +321,3 @@ def _valid_module(self, layer): return True raise TypeError('Module {} is not subclass of Module'.format(layer)) - -class LayerList(Module): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - pass - - def __getitem__(self, idx): - pass - - def __len__(self): - return len(self.layers) - - def __repr__(self): - pass - - def forward(self, inputs): - pass - diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index 769053f25..b15f77ed1 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -3,6 +3,7 @@ import copy, six from .common import str2act +from .common import _save_weights, _load_weights from paddle.fluid import framework from paddle.fluid.dygraph import Layer from paddle.fluid.framework import in_dygraph_mode @@ -189,7 +190,7 @@ def __call__(self, *inputs, **kwargs): return outputs - def _get_weights(self, var_name, shape, init=None, trainable=True): + def _get_weights(self, var_name, shape, init=None, trainable=True, transposed=None): if var_name in ["filters", "weights"]: w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False) elif var_name in ["biases"]: @@ -231,4 +232,11 @@ def init_build(self, *inputs, **kwargs): (2) Automatic shape inference when the user does not enter inchannels. """ - self.forward(*inputs, **kwargs) \ No newline at end of file + self.forward(*inputs, **kwargs) + + def save_weights(self, file_path, format=None): + _save_weights(net=self, file_path=file_path, format=format) + + def load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) \ No newline at end of file diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index 0f703881b..8eec34ebf 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -9,13 +9,37 @@ from tensorlayer.layers.utils import (get_variable_with_initializer) from tensorlayer import logging -__all__ = ['Module', 'SequentialLayer', 'LayerList'] +__all__ = ['Module', 'SequentialLayer'] _global_layer_name_dict = {} Parameter_ = tf.Variable class Module(object): + """The basic :class:`Module` class represents a single layer of a neural network. + It should be subclassed when implementing new types of layers. + Parameters + ---------- + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the Layer. + __call__() + Forwarding the computation. + all_weights() + Return a list of Tensor which are all weights of this Layer. + trainable_weights() + Return a list of Tensor which are all trainable weights of this Layer. + nontrainable_weights() + Return a list of Tensor which are all nontrainable weights of this Layer. + build() + Abstract method. Build the Layer. All trainable weights should be defined in this function. + forward() + Abstract method. Forward computation and return computation results. + + """ def __init__(self, name=None, act=None, *args, **kwargs): self._params = OrderedDict() @@ -83,7 +107,9 @@ def extend_repr(self): Sets the extended representation of the Module. To print customized extended information, re-implement this method in your own Layers. + """ + return '' def __repr__(self): @@ -123,7 +149,6 @@ def __setattr__(self, name, value): del self.__dict__[name] if params and name in params: raise TypeError("Expected type is Parameter, but got Module.") - # TODO How to prompt the user, enter the in_channels. # TODO Automatic shape inference when the user does not enter inchannels. # if value._built is False: # raise AttributeError( @@ -146,8 +171,9 @@ def forward(self, *inputs, **kwargs): def build(self, inputs_shape): raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True, transposed=None): """ Get trainable variables. """ + weight = get_variable_with_initializer( scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable ) @@ -156,10 +182,12 @@ def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), tr def save_weights(self, file_path, format=None): """Input file_path, save model weights into a file of given format.""" + _save_weights(self, file_path, format) def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(self, file_path, format, in_order, skip) def _set_mode_for_layers(self, is_train): @@ -171,6 +199,7 @@ def _set_mode_for_layers(self, is_train): Network's mode. True means training mode while False means evaluation mode. """ + layers = self.layers_and_names(name_prefix='') for layer_name, layer in layers: if isinstance(layer, Module): @@ -188,6 +217,7 @@ def set_train(self): >>> net.set_train() """ + if self.is_train !=True: self.is_train = True self._set_mode_for_layers(True) @@ -200,22 +230,15 @@ def set_eval(self): -------- >>> import tensorlayer as tl >>> net = tl.vgg16() - >>> net.eval() + >>> net.set_eval() # do evaluation """ + if self.is_train != False: self.is_train = False self._set_mode_for_layers(False) - def test(self): - """Set this network in evaluation mode.""" - self.eval() - - def infer(self): - """Set this network in evaluation mode.""" - self.eval() - @staticmethod def _compute_shape(tensors): if isinstance(tensors, list): @@ -231,16 +254,17 @@ def insert_param_to_layer(self, param_name, param, check_name=True): Inserts a parameter with given name to the layer. Please refer to the usage in source code of `tensorlayer.layer.Module.__setattr__`. - Args: - param_name (str): Name of the parameter. - param (Parameter): Parameter to be inserted to the layer. - check_name (bool): Determines whether the name input is compatible. Default: True. + Parameters + ---------- + param_name : str + Name of the parameter. + param : Parameter + Parameter to be inserted to the layer. + check_name : bool + Determines whether the name input is compatible. Default: True. - Raises: - KeyError: If the name of parameter is null or contains dot. - AttributeError: If user did not call init() first. - TypeError: If the type of parameter is not Parameter_. """ + if not param_name: raise KeyError("The name of parameter should not be null.") if check_name and '.' in param_name: @@ -271,6 +295,7 @@ def _add_node(self, input_tensors, output_tensors): Output tensors to this layer. """ + raise NotImplementedError @property @@ -304,15 +329,10 @@ def __delattr__(self, name): def trainable_weights(self): """ Returns all trainable weights. - Returns a list of all trainable parmeters. - Args: - recurse (bool): Whether contains the trainable weights of sublayers. Default: True. - - Returns: - List, the list of trainable weights. """ + self.get_weights() layers = self.layers_and_names(name_prefix='') for layer_name, layer in layers: @@ -328,15 +348,10 @@ def trainable_weights(self): def nontrainable_weights(self): """ Returns all untrainable weights. - Returns a list of all untrainable weights. - Args: - recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. - - Returns: - List, the list of untrainable weights. """ + layers = self.layers_and_names(name_prefix='') for layer_name, layer in layers: params = layer._params.items() @@ -349,6 +364,12 @@ def nontrainable_weights(self): @property def all_weights(self): + """ + Returns all weights. + Returns a list of all weights. + + """ + layers = self.layers_and_names(name_prefix='') for layer_name, layer in layers: params = layer._params.items() @@ -359,18 +380,22 @@ def all_weights(self): def get_weights(self, expand=True): """ Returns an iterator over layer weights. - Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. - Args: - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. + Parameters + ---------- + expand : bool + If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples + --------- + >>> net = Net() + >>> for item in net.get_weights(): + >>> print(item) - Examples: - >>> net = Net() - >>> for item in net.get_weights(): - >>> print(item) """ + for _, param in self.parameters_and_names(expand=expand): yield param @@ -387,14 +412,15 @@ def insert_child_to_layer(self, child_name, child): """ Adds a child layer to the current layer. - Args: - child_name (str): Name of the child layer. - child (Module): The child layer to be inserted. + Parameters + ---------- + child_name : str + Name of the child layer. + child : Module + The child layer to be inserted. - Raises: - KeyError: Child Module's name is incorrect or duplicated with the other child name. - TypeError: Child Module's type is incorrect. """ + if not child_name or '.' in child_name: raise KeyError("Child layer name is incorrect.") if hasattr(self, child_name) and child_name not in self._layers: @@ -409,18 +435,24 @@ def parameters_and_names(self, name_prefix='', expand=True): Includes the parameter's name and itself. - Args: - name_prefix (str): Namespace. Default: ''. - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. - - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.parameters_and_names(): - >>> if m[0]: - >>> names.append(m[0]) + Parameters + ---------- + name_prefix : str + Namespace. Default: ''. + expand : bool + If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples + --------- + >>> n = Net() + >>> names = [] + >>> for m in n.parameters_and_names(): + >>> if m[0]: + >>> names.append(m[0]) + """ + layers = [] if expand: layers = self.layers_and_names(name_prefix=name_prefix) @@ -447,17 +479,23 @@ def layers_and_names(self, layers=None, name_prefix=''): Includes the layer's name and itself. - Args: - layers (str): layers to iterate over. Default: None. - name_prefix (str): Namespace. Default: ''. + Parameters + ---------- + layers : str + layers to iterate over. Default: None. + name_prefix : str + Namespace. Default: ''. + + Examples + --------- + >>> n = Net() + >>> names = [] + >>> for m in n.layers_and_names(): + >>> if m[0]: + >>> names.append(m[0]) - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.layers_and_names(): - >>> if m[0]: - >>> names.append(m[0]) """ + t_layers = layers if layers else set() if self in t_layers: return @@ -475,6 +513,7 @@ def layers_and_names(self, layers=None, name_prefix=''): def layers(self): """Returns an iterator over immediate layers.""" + return self.name_layers().values() def name_layers(self): @@ -483,6 +522,7 @@ def name_layers(self): Include name of the layer and layer itself. """ + value_set = set() layers = OrderedDict() for name, layer in self._layers.items(): @@ -494,7 +534,7 @@ def name_layers(self): def init_build(self, *inputs, **kwargs): """ (1) This method must be called when the Layer has no input in_channels. - (2) Automatic shape inference when the user does not enter inchannels. + (2) Automatic shape inference when the user does not enter in_channels. """ self.forward(*inputs, **kwargs) @@ -502,31 +542,35 @@ def init_build(self, *inputs, **kwargs): class SequentialLayer(Module): """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. - - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + The class :class:`SequentialLayer` is a linear stack of layers. + The :class:`SequentialLayer` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. + Parameters + ---------- + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> seq = tl.layers.SequentialLayer([conv, bn]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) + Examples + --------- + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) """ + def __init__(self, *args): super(SequentialLayer, self).__init__() self._built = True @@ -601,165 +645,4 @@ def _valid_index(self, layer_num, index): def _valid_module(self, layer): if issubclass(layer.__class__, Module): return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) - - -class LayerList(Module): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - self.layers = layers - is_built = True - for layer in self.layers: - self._trainable_weights.extend(layer.trainable_weights) - self._nontrainable_weights.extend(layer.nontrainable_weights) - if layer._built is False: - is_built = False - # if layer._built and layer.all_weights is not None: - # # some layers in the list passed in have already been built - # # e.g. using input shape to construct layers in dynamic eager - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - if is_built: - self._built = True - - logging.info( - "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) - ) - - # check layer name uniqueness in LayerList - local_layer_name_set = set() - for layer in self.layers: - if layer.name not in local_layer_name_set: - local_layer_name_set.add(layer.name) - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return LayerList(list(self.layers)[idx]) - else: - return self.layers[idx] - - def __len__(self): - return len(self.layers) - - def __repr__(self): - tmpstr = 'LayerList' + '(\n' - for idx, layer in enumerate(self.layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' - - tmpstr = tmpstr + ')' - return tmpstr - - @property - def trainable_weights(self): - return self._trainable_weights - - @property - def nontrainable_weights(self): - return self._nontrainable_weights - - @property - def all_weights(self): - return self._trainable_weights + self._nontrainable_weights - - # def build(self, inputs_shape): - # """ - # Build the LayerList. The layer instances will be connected automatically one by one. - # """ - # in_tensor = self._input_tensors - # # in_layer = self._input_layer - # for layer in self.layers: - # is_build = layer._built - # out_tensor = layer(in_tensor) - # # nlayer = layer(in_layer) - # if is_build is False and layer.all_weights is not None: - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - # layer._built = True - # in_tensor = out_tensor - # # in_layer = nlayer - - def forward(self, inputs): - """ - Forward the computation. The computation will go through all layer instances. - """ - z = inputs - for layer in self.layers: - z = layer.forward(z) - return z - - def _set_mode_for_layers(self, is_train): - """Set training/evaluation mode for all layer instances.""" - self.is_train = is_train - for layer in self.layers: - if isinstance(layer, LayerList): - layer._set_mode_for_layers(is_train) - else: - layer.is_train = is_train - - def get_args(self): - init_args = {} - layers = self.layer_args["layers"] - init_args["layers"] = [layer.config for layer in layers] - init_args.update({"layer_type": "layerlist"}) - return init_args - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s \ No newline at end of file + raise TypeError('Module {} is not subclass of Module'.format(layer)) \ No newline at end of file diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index 2cb6699c0..548360371 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -15,7 +15,7 @@ class NonExistingLayerError(Exception): 'PTRelu6Layer', ] -__log__ = '\n Hint: 1) downgrade TF and TL from version 2.x to 1.x. 2) check the documentation of TF and TL version 2.x' +__log__ = '\n Hint: 1) downgrade TL from version 3.x to 2.x. 2) check the documentation of TF version 2.x and TL version 3.x' def PReluLayer(*args, **kwargs): @@ -414,3 +414,10 @@ def UnStackLayer(*args, **kwargs): def TimeDistributedLayer(*args, **kwargs): # raise NonExistingLayerError("TimeDistributedLayer(x1, x2, name='a') --> TimeDistributed(name='a')(x1, x2)") raise NonExistingLayerError("TimeDistributedLayer is removed for TF 2.0, please use eager mode instead." + __log__) + +__all__ += [ + 'LayerList' +] + +def LayerList(*args, **kwargs): + raise NonExistingLayerError("LayerList(list)(input_data) --> SequentialLayer(list)(input_data)" + __log__) \ No newline at end of file diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index a6b431368..249f58f18 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -482,11 +482,11 @@ def build(self, inputs_shape): init=self.E_init, ) self.embedding_lookup = tl.EmbeddingLookup() - self.not_equal = tl.Not_equal() + self.not_equal = tl.NotEqual() self.cast = tl.Cast(tl.float32) self.expand_dims = tl.ExpandDims(axis=-1) self.reduce_sum = tl.ReduceSum(axis=1) - self.count_nonzero = tl.Count_nonzero(keepdims=True, dtype=tl.float32) + self.count_nonzero = tl.CountNonzero(keepdims=True, dtype=tl.float32) def forward(self, inputs): """ @@ -505,7 +505,7 @@ def forward(self, inputs): # Count number of non-padding words in each sentence sentence_lengths = self.count_nonzero(masks, axis=1) - + print(masks, sentence_lengths) sentence_embeddings = tl.ops.divide( sum_word_embeddings, sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 @@ -514,4 +514,3 @@ def forward(self, inputs): outputs = sentence_embeddings return outputs - diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 5ab2e895b..1ced31579 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -171,6 +171,7 @@ def build(self, inputs_shape): self.act_init_flag = True def forward(self, inputs): + self._check_input_shape(inputs) if self._forward_state == False: if self._built == False: self.build(tl.get_tensor_shape(inputs)) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 006b34deb..d454be254 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -130,7 +130,6 @@ def __init__( strides=2, padding='SAME', data_format='channels_last', - dilation_rate=1, name=None # 'maxpool1d' ): super().__init__(name) @@ -138,7 +137,6 @@ def __init__( self.strides = self._strides = strides self.padding = padding self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate self.build() self._built = True @@ -150,8 +148,6 @@ def __init__( def __repr__(self): s = ('{classname}(filter_size={filter_size}' ', strides={strides}, padding={padding}') - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' if self.name is not None: s += ', name=\'{name}\'' s += ')' @@ -167,21 +163,13 @@ def build(self, inputs_shape=None): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self._dilation_rate = [self.dilation_rate] + self.max_pool = tl.ops.MaxPool1d(ksize=self._filter_size, strides=self._strides, padding=self.padding, + data_format=self.data_format) def forward(self, inputs): - outputs = tl.ops.pool( - input=inputs, - window_shape=self._filter_size, - pooling_type="MAX", - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - dilations=self._dilation_rate, - ) + outputs = self.max_pool(inputs) return outputs - class MeanPool1d(Module): """Mean pooling for 1D signal. @@ -222,7 +210,6 @@ def __init__( self.strides = self._strides = strides self.padding = padding self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate self.build() self._built = True @@ -234,8 +221,6 @@ def __init__( def __repr__(self): s = ('{classname}(filter_size={filter_size}' ', strides={strides}, padding={padding}') - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' if self.name is not None: s += ', name=\'{name}\'' s += ')' @@ -251,13 +236,13 @@ def build(self, inputs_shape=None): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self._dilation_rate = [self.dilation_rate] + self.avg_pool = tl.ops.AvgPool1d(ksize=self._filter_size, + strides=self._strides, + padding=self.padding, + data_format=self.data_format) def forward(self, inputs): - outputs = tl.ops.pool( - input=inputs, window_shape=self._filter_size, pooling_type="AVG", padding=self.padding, - dilations=self._dilation_rate, strides=self._strides, data_format=self.data_format - ) + outputs = self.avg_pool(inputs) return outputs diff --git a/tensorlayer/metric/__init__.py b/tensorlayer/metric/__init__.py index c11f8323b..75a03a345 100644 --- a/tensorlayer/metric/__init__.py +++ b/tensorlayer/metric/__init__.py @@ -7,8 +7,6 @@ from .tensorflow_metric import * elif BACKEND == 'mindspore': from .mindspore_metric import * -elif BACKEND == 'dragon': - pass elif BACKEND == 'paddle': from .paddle_metric import * else: diff --git a/tensorlayer/metric/mindspore_metric.py b/tensorlayer/metric/mindspore_metric.py index bcc6499d0..710ed4e88 100644 --- a/tensorlayer/metric/mindspore_metric.py +++ b/tensorlayer/metric/mindspore_metric.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import mindspore.nn as nn -from mindspore.nn.metrics._evaluation import EvaluationBase from mindspore.nn.metrics.metric import Metric __all__ = [ 'Accuracy', diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index e449af0be..00e5a1f24 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -2,24 +2,17 @@ # -*- coding: utf-8 -*- from collections.abc import Iterable -from tensorlayer.files import utils -from tensorlayer import logging +from tensorlayer.layers.core.common import _save_weights, _load_weights import tensorlayer as tl from tensorlayer.layers.core import Module import numpy as np -import os import time if tl.BACKEND == 'tensorflow': import tensorflow as tf if tl.BACKEND == 'mindspore': - import mindspore as ms from mindspore.ops import composite from mindspore.ops import operations as P - from mindspore.ops import functional as F - # from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) - # from mindspore.train.parallel_utils import ParallelMode - from mindspore.nn.wrap import DistributedGradReducer from mindspore.common import ParameterTuple if tl.BACKEND == 'paddle': import paddle as pd @@ -96,13 +89,18 @@ def train(self, n_epoch, train_dataset=None, test_dataset=False, print_train_bat ) def eval(self, test_dataset): - self.network.eval() + self.network.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = self.network(X_batch) test_loss += self.loss_fn(_logits, y_batch) if self.metrics: - test_acc += self.metrics(_logits, y_batch) + try: + test_acc += self.metrics(_logits, y_batch) + except: + self.metrics.update(_logits, y_batch) + test_acc += self.metrics.result() + self.metrics.reset() else: test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 @@ -134,43 +132,20 @@ def save_weights(self, file_path, format=None): -------- 1) Save model weights in hdf5 format by default. >>> net = vgg16() - >>> net.save_weights('./model.h5') + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.save_weights('./model.h5') ... - >>> net.load_weights('./model.h5') + >>> model.load_weights('./model.h5') 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') + >>> model.save_weights('./model.npz') + >>> model.save_weights('./model.npz', format='npz_dict') """ - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: - logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") - return - - if format is None: - postfix = file_path.split('.')[-1] - if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: - format = postfix - else: - format = 'hdf5' - - if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) - elif format == 'npz': - utils.save_npz(self.all_weights, file_path) - elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." - "Other format is not supported now." - ) + _save_weights(net=self, file_path=file_path, format=format) def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights(). @@ -201,15 +176,18 @@ def load_weights(self, file_path, format=None, in_order=True, skip=False): Examples -------- 1) load model from a hdf5 file. - >>> net = tl.models.vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially + >>> net = vgg16() + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> model.load_weights('./model_eager.h5') # load sequentially 2) load model from a npz file - >>> net.load_weights('./model.npz') + >>> model.load_weights('./model.npz') - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') + 3) load model from a npz file, which is saved as npz_dict previously + >>> model.load_weights('./model.npz', format='npz_dict') Notes ------- @@ -219,31 +197,8 @@ def load_weights(self, file_path, format=None, in_order=True, skip=False): 'in_order' argument will be ignored. """ - if not os.path.exists(file_path): - raise FileNotFoundError("file {} doesn't exist.".format(file_path)) - - if format is None: - format = file_path.split('.')[-1] - if format == 'hdf5' or format == 'h5': - if skip ==True or in_order == False: - # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) - else: - # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) - elif format == 'npz': - utils.load_and_assign_npz(file_path, self) - elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " - "Other format is not supported now." - ) + _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) def tf_train( self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, @@ -287,7 +242,7 @@ def tf_train( if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) # is_train=False, disable dropout @@ -340,7 +295,7 @@ def ms_train( if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) @@ -394,7 +349,7 @@ def pd_train( if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) # is_train=False, disable dropout diff --git a/tensorlayer/optimizers/dragon_optimizers.py b/tensorlayer/optimizers/dragon_optimizers.py deleted file mode 100644 index 523e785f8..000000000 --- a/tensorlayer/optimizers/dragon_optimizers.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import absolute_import, division, print_function -import dragon as dg - -__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] - -# Add module aliases - - -# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' -def Adadelta(**kwargs): - raise NotImplementedError('Adadelta optimizer function not implemented') - - -# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' -def Adagrad(**kwargs): - raise NotImplementedError('Adagrad optimizer function not implemented') - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' -Adam = dg.optimizers.Adam - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' -def Admax(**kwargs): - raise NotImplementedError('Admax optimizer function not implemented') - - -# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, -# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 -def Ftrl(**kwargs): - raise NotImplementedError('Ftrl optimizer function not implemented') - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', -def Nadam(**kwargs): - raise NotImplementedError('Nadam optimizer function not implemented') - - -# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' -RMSprop = dg.optimizers.RMSprop - -# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' -SGD = dg.optimizers.SGD - - -# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False -def Momentum(**kwargs): - raise NotImplementedError('Momentum optimizer function not implemented') - - -def Lamb(**kwargs): - raise NotImplementedError('Lamb optimizer function not implemented') - - -def LARS(**kwargs): - raise NotImplementedError('LARS optimizer function not implemented') diff --git a/tensorlayer/optimizers/load_optimizers_backend.py b/tensorlayer/optimizers/load_optimizers_backend.py index 31a905aaa..0fc4c0892 100644 --- a/tensorlayer/optimizers/load_optimizers_backend.py +++ b/tensorlayer/optimizers/load_optimizers_backend.py @@ -8,8 +8,6 @@ from .tensorflow_optimizers import * elif BACKEND == 'mindspore': from .mindspore_optimizers import * -elif BACKEND == 'dragon': - from .dragon_optimizers import * elif BACKEND == 'paddle': from .paddle_optimizers import * else: diff --git a/tests/layers/test_layers_pooling.py b/tests/layers/test_layers_pooling.py index 65643fca9..de61d52bc 100644 --- a/tests/layers/test_layers_pooling.py +++ b/tests/layers/test_layers_pooling.py @@ -33,15 +33,15 @@ def setUpClass(cls): n19 = tl.layers.AdaptiveMeanPool1d(output_size=44, name='test_adaptivemeanpool1d')(n1) n20 = tl.layers.AdaptiveMaxPool1d(output_size=44, name='test_adaptivemaxpool1d')(n1) - cls.n1_shape = n1.get_shape().as_list() - cls.n2_shape = n2.get_shape().as_list() - cls.n3_shape = n3.get_shape().as_list() - cls.n4_shape = n4.get_shape().as_list() - cls.n5_shape = n5.get_shape().as_list() - cls.n16_shape = n16.get_shape().as_list() - cls.n17_shape = n17.get_shape().as_list() - cls.n19_shape = n19.get_shape().as_list() - cls.n20_shape = n20.get_shape().as_list() + cls.n1_shape = tl.get_tensor_shape(n1) + cls.n2_shape = tl.get_tensor_shape(n2) + cls.n3_shape = tl.get_tensor_shape(n3) + cls.n4_shape = tl.get_tensor_shape(n4) + cls.n5_shape = tl.get_tensor_shape(n5) + cls.n16_shape = tl.get_tensor_shape(n16) + cls.n17_shape = tl.get_tensor_shape(n17) + cls.n19_shape = tl.get_tensor_shape(n19) + cls.n20_shape = tl.get_tensor_shape(n20) ## 2D ======================================================================== @@ -58,15 +58,14 @@ def setUpClass(cls): n21 = tl.layers.AdaptiveMeanPool2d(output_size=(45, 32), name='test_adaptivemeanpool2d')(n6) n22 = tl.layers.AdaptiveMaxPool2d(output_size=(45, 32), name='test_adaptivemaxpool2d')(n6) - cls.n6_shape = n6.get_shape().as_list() - cls.n7_shape = n7.get_shape().as_list() - cls.n8_shape = n8.get_shape().as_list() - cls.n9_shape = n9.get_shape().as_list() - cls.n10_shape = n10.get_shape().as_list() - cls.n15_shape = n15.get_shape().as_list() - # cls.n18_shape = n18.get_shape().as_list() - cls.n21_shape = n21.get_shape().as_list() - cls.n22_shape = n22.get_shape().as_list() + cls.n6_shape = tl.get_tensor_shape(n6) + cls.n7_shape = tl.get_tensor_shape(n7) + cls.n8_shape = tl.get_tensor_shape(n8) + cls.n9_shape = tl.get_tensor_shape(n9) + cls.n10_shape = tl.get_tensor_shape(n10) + cls.n15_shape = tl.get_tensor_shape(n15) + cls.n21_shape = tl.get_tensor_shape(n21) + cls.n22_shape = tl.get_tensor_shape(n22) ## 3D ======================================================================== From 9bd13f9eaf748ee30f78b2bbe2c7b80db8a44a24 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 16 Jun 2021 10:15:46 +0800 Subject: [PATCH 13/36] update readme --- README.md | 16 +++++++++++++--- requirements/requirements_multiple_backends.txt | 2 ++ 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 requirements/requirements_multiple_backends.txt diff --git a/README.md b/README.md index cd9d91546..ff0da4e04 100644 --- a/README.md +++ b/README.md @@ -21,12 +21,12 @@ -[TensorLayer](https://tensorlayer.readthedocs.io) is a novel TensorFlow-based deep learning and reinforcement learning library designed for researchers and engineers. It provides an extensive collection of customizable neural layers to build advanced AI models quickly, based on this, the community open-sourced mass [tutorials](https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/README.md) and [applications](https://github.com/tensorlayer). TensorLayer is awarded the 2017 Best Open Source Software by the [ACM Multimedia Society](https://twitter.com/ImperialDSI/status/923928895325442049). +[TensorLayer](https://tensorlayer.readthedocs.io) is a novel supports multiple backends deep learning and reinforcement learning library designed for researchers and engineers. It provides an extensive collection of customizable neural layers to build advanced AI models quickly, based on this, the community open-sourced mass [tutorials](https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/README.md) and [applications](https://github.com/tensorlayer). TensorLayer is awarded the 2017 Best Open Source Software by the [ACM Multimedia Society](https://twitter.com/ImperialDSI/status/923928895325442049). This project can also be found at [iHub](https://code.ihub.org.cn/projects/328) and [Gitee](https://gitee.com/organizations/TensorLayer). # News -🔥 **3.0.0 will supports multiple backends, such as TensorFlow, MindSpore and more, allowing users to run the code on different hardware like Nvidia-GPU and Huawei-Ascend. We need more people to join the dev team, if you are interested, please email hao.dong@pku.edu.cn** +🔥 **3.0.0 will supports multiple backends, such as TensorFlow, MindSpore , PaddlePaddle and more, allowing users to run the code on different hardware like Nvidia-GPU and Huawei-Ascend. We need more people to join the dev team, if you are interested, please email hao.dong@pku.edu.cn** 🔥 Reinforcement Learning Zoo: [Low-level APIs](https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning) for professional usage, [High-level APIs](https://github.com/tensorlayer/RLzoo) for simple usage, and a corresponding [Springer textbook](http://springer.com/gp/book/9789811540943) @@ -72,7 +72,7 @@ You can find a large collection of examples that use TensorLayer in [here](examp # Getting Start -TensorLayer 2.0 relies on TensorFlow, numpy, and others. To use GPUs, CUDA and cuDNN are required. +TensorLayer 3.0 relies on TensorFlow, numpy, and others. To use GPUs, CUDA and cuDNN are required. Install TensorFlow: @@ -99,6 +99,15 @@ pip3 install --upgrade tensorlayer[all] # all additional dependenci pip3 install --upgrade tensorlayer[extra] # only the `extra` dependencies pip3 install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies ``` +If you want to use mindspore backend, you should install mindspore>=1.2.0 +```bash +pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +if you want to use paddlepaddle backend, you should install paddlepaddle>=2.0 +```bash +python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` If you are TensorFlow 1.X users, you can use TensorLayer 1.11.0: @@ -150,6 +159,7 @@ The following table shows the training speeds of [VGG16](http://www.robots.ox.ac | Graph | Keras | channel last | 8677 | 2580 | 2576 | 101 | | Eager | TensorFlow 2.0 | channel last | 8723 | 2052 | 2024 | 97 | | | TensorLayer 2.0 | channel last | 8723 | 2010 | 2007 | 95 | +| | TensorLayer 3.0 | channel last | | | | | # Getting Involved diff --git a/requirements/requirements_multiple_backends.txt b/requirements/requirements_multiple_backends.txt new file mode 100644 index 000000000..c75ad53a2 --- /dev/null +++ b/requirements/requirements_multiple_backends.txt @@ -0,0 +1,2 @@ +mindspore==1.2.0 +paddlepaddle==2.1.0 \ No newline at end of file From 924e2b6ab6a6df5600c6afec6d714caeb7e22a30 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 21 Jun 2021 15:45:14 +0800 Subject: [PATCH 14/36] update tensorlayer3 --- README.md | 2 +- docs/modules/activation.rst | 79 +- docs/modules/app.rst | 10 - docs/modules/initializers.rst | 5 + docs/modules/layers.rst | 57 +- docs/modules/models.rst | 49 +- docs/modules/optimizers.rst | 13 + docs/user/examples.rst | 38 +- docs/user/get_start_advance.rst | 218 +-- docs/user/get_start_model.rst | 208 ++- docs/user/installation.rst | 22 +- .../tutorial_mnist_mlp_mindspore.py | 117 -- ...utorial_mnist_mlp_paddlepaddle_backend.py} | 0 examples/model_zoo/__init__.py | 6 + examples/model_zoo/pretrained_resnet50.py | 2 +- examples/model_zoo/pretrained_yolov4.py | 3 + examples/model_zoo/resnet.py | 32 +- examples/model_zoo/vgg.py | 64 +- examples/model_zoo/yolo.py | 10 +- .../requirements_multiple_backends.txt | 2 - requirements/requirements_paddle.txt | 1 + tensorlayer/backend/__init__.py | 2 +- tensorlayer/backend/ops/load_backend.py | 4 +- tensorlayer/backend/ops/mindspore_backend.py | 10 +- tensorlayer/backend/ops/mindspore_nn.py | 129 +- tensorlayer/backend/ops/paddle_backend.py | 5 + tensorlayer/backend/ops/paddle_nn.py | 1 + tensorlayer/backend/ops/tensorflow_backend.py | 3 + tensorlayer/backend/ops/tensorflow_nn.py | 1 + tensorlayer/cost/paddle_cost.py | 8 +- tensorlayer/cost/tensorflow_cost.py | 17 +- tensorlayer/dataflow/paddle_data.py | 2 +- tensorlayer/files/utils.py | 1 + tensorlayer/initializers/__init__.py | 2 +- .../initializers/mindspore_initializers.py | 2 + .../initializers/paddle_initializers.py | 6 +- .../initializers/tensorflow_initializers.py | 55 + tensorlayer/layers/activation.py | 46 +- tensorlayer/layers/convolution/binary_conv.py | 1 - .../layers/convolution/deformable_conv.py | 11 +- tensorlayer/layers/convolution/group_conv.py | 3 + tensorlayer/layers/convolution/quan_conv.py | 5 +- .../layers/convolution/quan_conv_bn.py | 2 +- .../layers/convolution/separable_conv.py | 6 +- .../layers/convolution/simplified_conv.py | 1 - .../layers/convolution/ternary_conv.py | 6 +- tensorlayer/layers/core/common.py | 2 + tensorlayer/layers/core/core_mindspore.py | 13 +- tensorlayer/layers/core/core_paddle.py | 24 +- tensorlayer/layers/core/core_tensorflow.py | 15 +- tensorlayer/layers/dense/base_dense.py | 5 +- tensorlayer/layers/dense/binary_dense.py | 9 +- tensorlayer/layers/dense/dorefa_dense.py | 10 +- tensorlayer/layers/dense/dropconnect.py | 14 +- tensorlayer/layers/dense/quan_dense.py | 8 + tensorlayer/layers/dense/quan_dense_bn.py | 8 +- tensorlayer/layers/deprecated.py | 22 +- tensorlayer/layers/dropout.py | 5 + tensorlayer/layers/embedding.py | 22 +- tensorlayer/layers/extend.py | 2 +- tensorlayer/layers/image_resampling.py | 24 +- tensorlayer/layers/inputs.py | 7 + tensorlayer/layers/lambda_layers.py | 20 +- tensorlayer/layers/merge.py | 16 +- tensorlayer/layers/normalization.py | 22 +- tensorlayer/layers/padding.py | 19 +- tensorlayer/layers/pooling.py | 93 +- tensorlayer/layers/quantize.py | 3 - tensorlayer/layers/recurrent.py | 1258 +++++++++++++++++ tensorlayer/layers/shape.py | 5 - tensorlayer/layers/stack.py | 11 +- tensorlayer/layers/utils.py | 4 +- tensorlayer/models/__init__.py | 2 +- tensorlayer/models/core.py | 34 +- ...et50_weights_tf_dim_ordering_tf_kernels.h5 | Bin 24576 -> 0 bytes tensorlayer/package_info.py | 2 +- 76 files changed, 2060 insertions(+), 886 deletions(-) delete mode 100644 docs/modules/app.rst delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py rename examples/basic_tutorials/{tutorial_paddle_tensorlayer_mlp.py => tutorial_mnist_mlp_paddlepaddle_backend.py} (100%) delete mode 100644 requirements/requirements_multiple_backends.txt create mode 100644 requirements/requirements_paddle.txt delete mode 100644 tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 diff --git a/README.md b/README.md index ff0da4e04..b3fb68630 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ If you want to use mindspore backend, you should install mindspore>=1.2.0 pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple ``` -if you want to use paddlepaddle backend, you should install paddlepaddle>=2.0 +If you want to use paddlepaddle backend, you should install paddlepaddle>=2.0 ```bash python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple ``` diff --git a/docs/modules/activation.rst b/docs/modules/activation.rst index 79bad9601..be250d8cb 100644 --- a/docs/modules/activation.rst +++ b/docs/modules/activation.rst @@ -2,9 +2,7 @@ API - Activations ========================= To make TensorLayer simple, we minimize the number of activation functions as much as -we can. So we encourage you to use TensorFlow's function. TensorFlow provides -``tf.nn.relu``, ``tf.nn.relu6``, ``tf.nn.elu``, ``tf.nn.softplus``, -``tf.nn.softsign`` and so on. +we can. So we encourage you to use Customizes activation function. For parametric activation, please read the layer APIs. The shortcut of ``tensorlayer.activation`` is ``tensorlayer.act``. @@ -14,64 +12,71 @@ Your activation Customizes activation function in TensorLayer is very easy. The following example implements an activation that multiplies its input by 2. -For more complex activation, TensorFlow API will be required. +For more complex activation, TensorFlow(MindSpore/PaddlePaddle) API will be required. .. code-block:: python - def double_activation(x): - return x * 2 - - double_activation = lambda x: x * 2 + class DoubleActivation(object): + def __init__(self): + pass + def __call__(self, x): + return x * 2 + double_activation = DoubleActivation() -.. automodule:: tensorlayer.activation +.. automodule:: tensorlayer.layers.activation .. autosummary:: - leaky_relu - leaky_relu6 - leaky_twice_relu6 - ramp - swish - sign - hard_tanh - pixel_wise_softmax - mish - -Ramp + PRelu + PRelu6 + PTRelu6 + LeakyReLU + LeakyReLU6 + LeakyTwiceRelu6 + Ramp + Swish + HardTanh + Mish + +PRelu ------ -.. autofunction:: ramp +.. autofunction:: PRelu -Leaky ReLU +PRelu6 ------------ -.. autofunction:: leaky_relu +.. autofunction:: PRelu6 -Leaky ReLU6 +PTRelu6 ------------ -.. autofunction:: leaky_relu6 +.. autofunction:: PTRelu6 -Twice Leaky ReLU6 +LeakyReLU ----------------- -.. autofunction:: leaky_twice_relu6 +.. autofunction:: LeakyReLU -Swish +LeakyReLU6 ------------ -.. autofunction:: swish +.. autofunction:: LeakyReLU6 -Sign +LeakyTwiceRelu6 --------------------- -.. autofunction:: sign +.. autofunction:: LeakyTwiceRelu6 -Hard Tanh +Ramp --------------------- -.. autofunction:: hard_tanh +.. autofunction:: Ramp -Pixel-wise softmax +Swish -------------------- -.. autofunction:: pixel_wise_softmax +.. autofunction:: Swish + +HardTanh +---------------- +.. autofunction:: HardTanh -mish +Mish --------- -.. autofunction:: mish +.. autofunction:: Mish Parametric activation ------------------------------ diff --git a/docs/modules/app.rst b/docs/modules/app.rst deleted file mode 100644 index d636292e8..000000000 --- a/docs/modules/app.rst +++ /dev/null @@ -1,10 +0,0 @@ -API - Application Library -========================= - -Application library is an open source Deep learning applications based on TensorLayer. - -Supported Application: -------------------------- - - - diff --git a/docs/modules/initializers.rst b/docs/modules/initializers.rst index 6311619f2..3bf421337 100644 --- a/docs/modules/initializers.rst +++ b/docs/modules/initializers.rst @@ -16,6 +16,7 @@ e.g. ``tf.initializers.he_normal``, please refer to TensorFlow provided initiali RandomUniform RandomNormal TruncatedNormal + HeNormal deconv2d_bilinear_upsampling_initializer Initializer @@ -46,6 +47,10 @@ TruncatedNormal --------------------- .. autoclass:: TruncatedNormal +HeNormal +------------ +.. autoclass:: HeNormal + deconv2d_bilinear_upsampling_initializer ------------------------------------------ .. autofunction:: deconv2d_bilinear_upsampling_initializer diff --git a/docs/modules/layers.rst b/docs/modules/layers.rst index 78e0eee9a..8f08aefde 100644 --- a/docs/modules/layers.rst +++ b/docs/modules/layers.rst @@ -12,10 +12,9 @@ Layer list .. autosummary:: - Layer + Module - ModelLayer - LayerList + SequentialLayer Input @@ -73,14 +72,6 @@ Layer list BatchNorm1d BatchNorm2d BatchNorm3d - LocalResponseNorm - InstanceNorm - InstanceNorm1d - InstanceNorm2d - InstanceNorm3d - LayerNorm - GroupNorm - SwitchNorm RNN SimpleRNN @@ -134,17 +125,13 @@ Layer list Base Layer ----------- -Base Layer -^^^^^^^^^^^^^^^^ -.. autoclass:: Layer - -Model Layer +Module ^^^^^^^^^^^^^^^^ -.. autoclass:: ModelLayer +.. autoclass:: Module -Layer List +Sequential Layer ^^^^^^^^^^^^^^^^ -.. autoclass:: LayerList +.. autoclass:: SequentialLayer .. ----------------------------------------------------------- .. Input Layer @@ -399,38 +386,6 @@ Batch Normalization 3D ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: BatchNorm3d -Local Response Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LocalResponseNorm - -Instance Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm - -Instance Normalization 1D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm1d - -Instance Normalization 2D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm2d - -Instance Normalization 3D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm3d - -Layer Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LayerNorm - -Group Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: GroupNorm - -Switch Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: SwitchNorm - .. ----------------------------------------------------------- .. Padding Layers .. ----------------------------------------------------------- diff --git a/docs/modules/models.rst b/docs/modules/models.rst index 272f1d9c6..821d30c75 100644 --- a/docs/modules/models.rst +++ b/docs/modules/models.rst @@ -1,59 +1,34 @@ -API - Models +API - Pretrained Models ================================ TensorLayer provides many pretrained models, you can easily use the whole or a part of the pretrained models via these APIs. -.. automodule:: tensorlayer.models +.. automodule:: examples.model_zoo .. autosummary:: - Model - - VGG16 - VGG19 - SqueezeNetV1 - MobileNetV1 + vgg16 + vgg19 + YOLOv4 ResNet50 - Seq2seq - Seq2seqLuongAttention - - -Base Model ------------ -.. autoclass:: Model - -VGG16 +vgg16 ---------------------- -.. autofunction:: VGG16 +.. autofunction:: vgg16 -VGG19 +vgg19 ---------------------- -.. autofunction:: VGG19 - -SqueezeNetV1 ----------------- -.. autofunction:: SqueezeNetV1 +.. autofunction:: vgg19 -MobileNetV1 +YOLOv4 ---------------- -.. autofunction:: MobileNetV1 +.. autofunction:: YOLOv4 ResNet50 ---------------- -.. autofunction:: ResNet50 - -Seq2seq ------------------------- - -.. autoclass:: Seq2seq - - -Seq2seq Luong Attention ------------------------- +.. autofuncion:: ResNet50 -.. autoclass:: Seq2seqLuongAttention diff --git a/docs/modules/optimizers.rst b/docs/modules/optimizers.rst index 0ababc899..9f272d39c 100644 --- a/docs/modules/optimizers.rst +++ b/docs/modules/optimizers.rst @@ -5,6 +5,8 @@ API - Optimizers TensorLayer provides simple API and tools to ease research, development and reduce the time to production. Therefore, we provide the latest state of the art optimizers that work with Tensorflow. +The optimizers functions provided by TensorFlow can be used in TensorLayer. +We have also wrapped the optimizers functions for each framework, which can be found in tensorlayer.optimizers. Optimizers List --------------- @@ -12,6 +14,17 @@ Optimizers List .. autosummary:: AMSGrad + Adadelta + Adagrad + Adam + Adamax + Ftrl + Nadam + RMSprop + SGD + Momentum + Lamb + LARS AMSGrad Optimizer ----------------- diff --git a/docs/user/examples.rst b/docs/user/examples.rst index 91971c0a0..80c3e8b8b 100644 --- a/docs/user/examples.rst +++ b/docs/user/examples.rst @@ -6,13 +6,28 @@ Examples We list some examples here, but more tutorials and applications can be found in `Github examples `__ and `Awesome-TensorLayer `_. +Commonly used dataset and pretrained models +=========================================== + + - MNIST, see `MNIST `__. + - CIFAR10, see `CIFAR10 `__. + + - YOLOv4 Pretrained Model, see `YOLOv4 `__. password: idsz + - VGG16 Pretrained Model, see `VGG16 `__. password: t36u + - VGG19 Pretrained Model, see `VGG19 `__. password: rb8w + - ResNet50 Pretrained Model, see `ResNet50 `__. password: 3nui + Basics ============ - - Multi-layer perceptron (MNIST), simple usage. Classification task, see `tutorial_mnist_simple.py `__. - - Multi-layer perceptron (MNIST), dynamic model. Classification with dropout using iterator, see `tutorial_mnist_mlp_dynamic.py method2 `__. - - Multi-layer perceptron (MNIST), static model. Classification with dropout using iterator, see `tutorial_mnist_mlp_static.py `__. - - Convolutional Network (CIFAR-10). Classification task, see `tutorial_cifar10_cnn_static.py `_. + - Multi-layer perceptron (MNIST), simple usage and supports multiple backends. Classification task, see `tutorial_mnist_simple.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and tensorflow. Classification with dropout using iterator, see `tutorial_mnist_mlp_tensorflow_backend.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and mindspore. Classification task, see `tutorial_mnist_mlp_mindspore_backend.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and paddlepaddle. Classification task, see `tutorial_mnist_mlp_paddlepaddle_backend.py `__. + + - Convolutional Network (CIFAR-10). mix of tensorlayer and tensorflow. Classification task, see `tutorial_cifar10_cnn_tensorflow_backend.py `_. + - Convolutional Network (CIFAR-10). mix of tensorlayer and mindspore. Classification task, see `tutorial_cifar10_cnn_mindspore_backend.py `_. + - TensorFlow dataset API for object detection see `here `__. - Data augmentation with TFRecord. Effective way to load and pre-process data, see `tutorial_tfrecord*.py `__ and `tutorial_cifar10_tfrecord.py `__. - Data augmentation with TensorLayer. See `tutorial_fast_affine_transform.py `__ (for quick test only). @@ -20,15 +35,16 @@ Basics Pretrained Models ================== - - VGG 16 (ImageNet). Classification task, see `tutorial_models_vgg16 `__. + - VGG 16 (ImageNet). Classification task, see `pretrained_vgg16 `__. - VGG 19 (ImageNet). Classification task, see `tutorial_models_vgg19.py `__. - - SqueezeNet (ImageNet). Model compression, see `tutorial_models_squeezenetv1.py `__. - - MobileNet (ImageNet). Model compression, see `tutorial_models_mobilenetv1.py `__. + - YOLOv4 (MS-COCO). Object Detection, see `pretrained_yolov4.py `__. + - SqueezeNet (ImageNet, Based on TensroLayer2.0). Model compression, see `tutorial_models_squeezenetv1.py `__. + - MobileNet (ImageNet, Based on TensroLayer2.0). Model compression, see `tutorial_models_mobilenetv1.py `__. - All pretrained models in `pretrained-models `__. Vision ================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization, see `examples `__. - ArcFace: Additive Angular Margin Loss for Deep Face Recognition, see `InsignFace `__. - BinaryNet. Model compression, see `mnist `__ `cifar10 `__. @@ -44,6 +60,7 @@ Vision Adversarial Learning ======================== +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - DCGAN (CelebA). Generating images by `Deep Convolutional Generative Adversarial Networks `__ by `zsdonghao `__. - `Generative Adversarial Text to Image Synthesis `__ by `zsdonghao `__. - `Unsupervised Image to Image Translation with Generative Adversarial Networks `__ by `zsdonghao `__. @@ -54,7 +71,7 @@ Adversarial Learning Natural Language Processing ============================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Recurrent Neural Network (LSTM). Apply multiple LSTM to PTB dataset for language modeling, see `tutorial_ptb_lstm_state_is_tuple.py `__. - Word Embedding (Word2vec). Train a word embedding matrix, see `tutorial_word2vec_basic.py `__. - Restore Embedding matrix. Restore a pre-train embedding matrix, see `tutorial_generate_text.py `__. @@ -65,7 +82,7 @@ Natural Language Processing Reinforcement Learning ============================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Policy Gradient / Network (Atari Ping Pong), see `tutorial_atari_pong.py `__. - Deep Q-Network (Frozen lake), see `tutorial_frozenlake_dqn.py `__. - Q-Table learning algorithm (Frozen lake), see `tutorial_frozenlake_q_table.py `__. @@ -77,6 +94,7 @@ Reinforcement Learning Miscellaneous ================= +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - `Sipeed `__ : Run TensorLayer on AI Chips diff --git a/docs/user/get_start_advance.rst b/docs/user/get_start_advance.rst index db3441cde..1dae18a7a 100644 --- a/docs/user/get_start_advance.rst +++ b/docs/user/get_start_advance.rst @@ -11,11 +11,13 @@ Customizing layer Layers with weights ---------------------- -The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is as follow, which can only support static model. +The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is as follow. .. code-block:: python - class Dense(Layer): + from tensorlayer.layers import Module + + class Dense(Module): """The :class:`Dense` class is a fully connected layer. Parameters @@ -33,12 +35,16 @@ The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is a n_units, # the number of units/channels of this layer act=None, # None: no activation, tf.nn.relu or 'relu': ReLU ... name=None, # the name of this layer (optional) + in_channels = None ): super(Dense, self).__init__(name, act=act) # auto naming, dense_1, dense_2 ... self.n_units = n_units + self.in_channels = in_channels + self.build() + self._built = True - def build(self, inputs_shape): # initialize the model weights here - shape = [inputs_shape[1], self.n_units] + def build(self): # initialize the model weights here + shape = [self.in_channels, self.n_units] self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) @@ -48,13 +54,14 @@ The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is a z = self.act(z) return z -The full implementation is as follow, which supports both static and dynamic models and allows users to control whether to use the bias, how to initialize the weight values. +The full implementation is as follow, which supports both automatic inference input and dynamic models and allows users to control whether to use the bias, how to initialize the weight values. .. code-block:: python - class Dense(Layer): + + class Dense(Module): """The :class:`Dense` class is a fully connected layer. - + Parameters ---------- n_units : int @@ -70,38 +77,53 @@ The full implementation is as follow, which supports both static and dynamic mod If None, it will be automatically detected when the layer is forwarded for the first time. name : None or str A unique layer name. If None, a unique name will be automatically generated. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([100, 50], name='input') + >>> dense = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=50, name='dense_1') + >>> print(dense) + Dense(n_units=800, relu, in_channels='50', name='dense_1') + >>> tensor = tl.layers.Dense(n_units=800, act=tl.ReLU, name='dense_2')(net) + >>> print(tensor) + tf.Tensor([...], shape=(100, 800), dtype=float32) + + Notes + ----- + If the layer input has more than two axes, it needs to be flatten by using :class:`Flatten`. + """ - + def __init__( - self, - n_units, - act=None, - W_init=tl.initializers.truncated_normal(stddev=0.1), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, # the number of units/channels of the previous layer - name=None, + self, + n_units, + act=None, + W_init=tl.initializers.truncated_normal(stddev=0.05), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, # 'dense', ): - # we feed activation function to the base layer, `None` denotes identity function - # string (e.g., relu, sigmoid) will be converted into function. - super(Dense, self).__init__(name, act=act) + + super(Dense, self).__init__(name, act=act) self.n_units = n_units self.W_init = W_init self.b_init = b_init self.in_channels = in_channels - # in dynamic model, the number of input channel is given, we initialize the weights here - if self.in_channels is not None: + if self.in_channels is not None: self.build(self.in_channels) self._built = True logging.info( "Dense %s: %d %s" % - (self.name, self.n_units, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, self.n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) - def __repr__(self): # optional, for printing information - actstr = self.act.__name__ if self.act is not None else 'No Activation' + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(n_units={n_units}, ' + actstr) if self.in_channels is not None: s += ', in_channels=\'{in_channels}\'' @@ -110,21 +132,40 @@ The full implementation is as follow, which supports both static and dynamic mod s += ')' return s.format(classname=self.__class__.__name__, **self.__dict__) - def build(self, inputs_shape): # initialize the model weights here - if self.in_channels: # if the number of input channel is given, use it + def build(self, inputs_shape): + if self.in_channels is None and len(inputs_shape) != 2: + raise AssertionError("The input dimension must be rank 2, please reshape or flatten it") + if self.in_channels: shape = [self.in_channels, self.n_units] - else: # otherwise, get it from static model + else: self.in_channels = inputs_shape[1] shape = [inputs_shape[1], self.n_units] + self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) - if self.b_init: # if b_init is None, no bias is applied - self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) - def forward(self, inputs): - z = tf.matmul(inputs, self.W) + self.b_init_flag = False if self.b_init: - z = tf.add(z, self.b) + self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) + self.b_init_flag = True + self.bias_add = tl.ops.BiasAdd() + + self.act_init_flag = False if self.act: + self.act_init_flag = True + + self.matmul = tl.ops.MatMul() + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + z = self.matmul(inputs, self.W) + if self.b_init_flag: + z = self.bias_add(z, self.b) + if self.act_init_flag: z = self.act(z) return z @@ -136,37 +177,54 @@ We use Dropout as an example here: .. code-block:: python - class Dropout(Layer): - """ - The :class:`Dropout` class is a noise layer which randomly set some - activations to zero according to a keeping probability. - Parameters - ---------- - keep : float - The keeping probability. - The lower the probability it is, the more activations are set to zero. - name : None or str - A unique layer name. - """ - - def __init__(self, keep, name=None): - super(Dropout, self).__init__(name) - self.keep = keep - - self.build() - self._built = True - - logging.info("Dropout %s: keep: %f " % (self.name, self.keep)) - - def build(self, inputs_shape=None): - pass # no weights in dropout layer - - def forward(self, inputs): - if self.is_train: # this attribute is changed by Model.train() and Model.eval() described above - outputs = tf.nn.dropout(inputs, rate=1 - (self.keep), name=self.name) - else: - outputs = inputs - return outputs + class Dropout(Module): + """ + The :class:`Dropout` class is a noise layer which randomly set some + activations to zero according to a keeping probability. + + Parameters + ---------- + keep : float + The keeping probability. + The lower the probability it is, the more activations are set to zero. + seed : int or None + The seed for random dropout. + name : None or str + A unique layer name. + + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Dropout(keep=0.2)(net) + + """ + + def __init__(self, keep, seed=0, name=None): #"dropout"): + super(Dropout, self).__init__(name) + self.keep = keep + self.seed = seed + + self.build() + self._built = True + + logging.info("Dropout %s: keep: %f " % (self.name, self.keep)) + + def __repr__(self): + s = ('{classname}(keep={keep}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + self.dropout = tl.ops.Dropout(keep=self.keep, seed=self.seed) + + def forward(self, inputs): + if self.is_train: + outputs = self.dropout(inputs) + else: + outputs = inputs + return outputs Pre-trained CNN ================ @@ -176,42 +234,14 @@ Get entire CNN .. code-block:: python - import tensorflow as tf + import tensorlayer as tl import numpy as np from tensorlayer.models.imagenet_classes import class_names + from examples.model_zoo import vgg16 - vgg = tl.models.vgg16(pretrained=True) + vgg = vgg16(pretrained=True) img = tl.vis.read_image('data/tiger.jpeg') - img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 + img = tl.prepro.imresize(img, (224, 224)).astype(tl.float32) / 255 output = vgg(img, is_train=False) -Get a part of CNN ------------------- - -.. code-block:: python - - # get VGG without the last layer - cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() - # add one more layer and build a new model - ni = tl.layers.Input([None, 224, 224, 3], name="inputs") - nn = cnn(ni) - nn = tl.layers.Dense(n_units=100, name='out')(nn) - model = tl.models.Model(inputs=ni, outputs=nn) - # train your own classifier (only update the last layer) - train_weights = model.get_layer('out').all_weights - -Reuse CNN ------------------- - -.. code-block:: python - - # in dynamic model, we can directly use the same model - # in static model - vgg_layer = tl.models.vgg16().as_layer() - ni_1 = tl.layers.Input([None, 224, 224, 3]) - ni_2 = tl.layers.Input([None, 224, 224, 3]) - a_1 = vgg_layer(ni_1) - a_2 = vgg_layer(ni_2) - M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) - diff --git a/docs/user/get_start_model.rst b/docs/user/get_start_model.rst index 2337a7d55..e5a1cf749 100644 --- a/docs/user/get_start_model.rst +++ b/docs/user/get_start_model.rst @@ -5,31 +5,26 @@ Define a model =============== TensorLayer provides two ways to define a model. -Static model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. +Sequential model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. -Static model +Sequential model =============== .. code-block:: python - import tensorflow as tf - from tensorlayer.layers import Input, Dropout, Dense - from tensorlayer.models import Model - - def get_model(inputs_shape): - ni = Input(inputs_shape) - nn = Dropout(keep=0.8)(ni) - nn = Dense(n_units=800, act=tf.nn.relu, name="dense1")(nn) # “name" is optional - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=10, act=None)(nn) - M = Model(inputs=ni, outputs=nn, name="mlp") # “name" is optional - return M - - MLP = get_model([None, 784]) - MLP.eval() - outputs = MLP(data) + from tensorlayer.layers import SequentialLayer + from tensorlayer.layers import Dense + import tensorlayer as tl + + def get_model(): + layer_list = [] + layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) + layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) + layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) + MLP = SequentialLayer(layer_list) + return MLP + + Dynamic model ======================= @@ -39,15 +34,18 @@ In this case, you need to manually input the output shape of the previous layer .. code-block:: python - class CustomModel(Model): + import tensorlayer as tl + from tensorlayer.layers import Module + from tensorlayer.layers import Dropout, Dense + class CustomModel(Module): def __init__(self): super(CustomModel, self).__init__() self.dropout1 = Dropout(keep=0.8) - self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) self.dropout2 = Dropout(keep=0.8) - self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) self.dropout3 = Dropout(keep=0.8) self.dense3 = Dense(n_units=10, act=None, in_channels=800) @@ -63,73 +61,83 @@ In this case, you need to manually input the output shape of the previous layer return out MLP = CustomModel() - MLP.eval() + MLP.set_eval() outputs = MLP(data, foo=True) # controls the forward here outputs = MLP(data, foo=False) +Dynamic model do not manually input the output shape +======================= + + +In this case, you do not manually input the output shape of the previous layer to the new layer. + +.. code-block:: python + + import tensorlayer as tl + from tensorlayer.layers import Module + from tensorlayer.layers import Dropout, Dense + class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, act=tl.ReLU) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=None) + + def forward(self, x, foo=False): + z = self.dropout1(x) + z = self.dense1(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo: + out = tf.nn.softmax(out) + return out + + MLP = CustomModel() + MLP.init_build(tl.layers.Input(shape=(1, 784))) # init_build must be called to initialize the weights. + MLP.set_eval() + outputs = MLP(data, foo=True) # controls the forward here + outputs = MLP(data, foo=False) + Switching train/test modes ============================= .. code-block:: python # method 1: switch before forward - Model.train() # enable dropout, batch norm moving avg ... - output = Model(train_data) + MLP.set_train() # enable dropout, batch norm moving avg ... + output = MLP(train_data) ... # training code here - Model.eval() # disable dropout, batch norm moving avg ... - output = Model(test_data) + Model.set_eval() # disable dropout, batch norm moving avg ... + output = MLP(test_data) ... # testing code here - # method 2: switch while forward - output = Model(train_data, is_train=True) - output = Model(test_data, is_train=False) + # method 2: Using packaged training modules + model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) + model.train(n_epoch=n_epoch, train_dataset=train_ds) Reuse weights ======================= -For static model, call the layer multiple time in model creation - -.. code-block:: python - - # create siamese network - - def create_base_network(input_shape): - '''Base network to be shared (eq. to feature extraction). - ''' - input = Input(shape=input_shape) - x = Flatten()(input) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - return Model(input, x) - - - def get_siamese_network(input_shape): - """Create siamese network with shared base network as layer - """ - base_layer = create_base_network(input_shape).as_layer() # convert model as layer - - ni_1 = Input(input_shape) - ni_2 = Input(input_shape) - nn_1 = base_layer(ni_1) # call base_layer twice - nn_2 = base_layer(ni_2) - return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2]) - - siamese_net = get_siamese_network([None, 784]) - For dynamic model, call the layer multiple time in forward function .. code-block:: python - class MyModel(Model): + import tensorlayer as tl + from tensorlayer.layers import Module, Dense, Concat + class MyModel(Module): def __init__(self): super(MyModel, self).__init__() - self.dense_shared = Dense(n_units=800, act=tf.nn.relu, in_channels=784) - self.dense1 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) - self.dense2 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) + self.dense_shared = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dense1 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + self.dense2 = Dense(n_units=10, act=tl.ReLU, in_channels=800) self.cat = Concat() def forward(self, x): @@ -158,56 +166,6 @@ Print model information # (dropout_2): Dropout(keep=0.8, name='dropout_2') # (dense_2): Dense(n_units=10, None, in_channels='800', name='dense_2') # ) - - import pprint - pprint.pprint(MLP.config) # print the model architecture - # {'inputs': '_inputlayer_1_node_0', - # 'model_architecture': [{'args': {'dtype': tf.float32, - # 'layer_type': 'normal', - # 'name': '_inputlayer_1', - # 'shape': [None, 784]}, - # 'class': '_InputLayer', - # 'prev_layer': None}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_1'}, - # 'class': 'Dropout', - # 'prev_layer': ['_inputlayer_1_node_0']}, - # {'args': {'act': 'relu', - # 'layer_type': 'normal', - # 'n_units': 800, - # 'name': 'dense_1'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_1_node_0']}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_2'}, - # 'class': 'Dropout', - # 'prev_layer': ['dense_1_node_0']}, - # {'args': {'act': 'relu', - # 'layer_type': 'normal', - # 'n_units': 800, - # 'name': 'dense_2'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_2_node_0']}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_3'}, - # 'class': 'Dropout', - # 'prev_layer': ['dense_2_node_0']}, - # {'args': {'act': None, - # 'layer_type': 'normal', - # 'n_units': 10, - # 'name': 'dense_3'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_3_node_0']}], - # 'name': 'mlp', - # 'outputs': 'dense_3_node_0', - # 'version_info': {'backend': 'tensorflow', - # 'backend_version': '2.0.0-alpha0', - # 'save_date': None, - # 'tensorlayer_version': '2.1.0', - # 'training_device': 'gpu'}} Get specific weights ======================= @@ -220,10 +178,6 @@ We can get the specific weights by indexing or naming. all_weights = MLP.all_weights some_weights = MLP.all_weights[1:3] - # naming - some_weights = MLP.get_layer('dense1').all_weights - - Save and restore model ======================= @@ -235,15 +189,17 @@ Save weights only .. code-block:: python - MLP.save_weights('model_weights.h5') # by default, file will be in hdf5 format - MLP.load_weights('model_weights.h5') + MLP.save_weights('./model_weights.npz') # by default, file will be in hdf5 format + MLP.load_weights('./model_weights.npz') Save model architecture and weights (optional) ----------------------------------------------- .. code-block:: python - # When using Model.load(), there is no need to reimplement or declare the architecture of the model explicitly in code - MLP.save('model.h5', save_weights=True) - MLP = Model.load('model.h5', load_weights=True) + # When using packaged training modules. Saving and loading the model can be done as follows + model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) + model.train(n_epoch=n_epoch, train_dataset=train_ds) + model.save_weights('./model.npz', format='npz_dict') + model.load_weights('./model.npz', format='npz_dict') diff --git a/docs/user/installation.rst b/docs/user/installation.rst index 3ba467f84..06d08fba9 100644 --- a/docs/user/installation.rst +++ b/docs/user/installation.rst @@ -15,8 +15,9 @@ Mac OX, Linux and Windows, or ask for help on `tensorlayer@gmail.com `_. -Install TensorFlow +Install Backend ========================= +TensorLayer supports multiple deep learning backends, default TensorFlow as backend also supports MindSpore and PaddlePaddle. .. code-block:: bash @@ -24,9 +25,24 @@ Install TensorFlow pip3 install tensorflow-gpu # GPU version pip3 install tensorflow # CPU version + The installation instructions of TensorFlow are written to be very detailed on `TensorFlow`_ website. However, there are something need to be considered. For example, `TensorFlow`_ officially supports GPU acceleration for Linux, Mac OX and Windows at present. For ARM processor architecture, you need to install TensorFlow from source. +If you want to use mindspore backend, you should install mindspore==1.2.0. + +.. code-block:: bash + + pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple + + +If you want to use paddlepaddle backend, you should install paddlepaddle==2.0. + +.. code-block:: bash + + python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple + + Install TensorLayer ========================= @@ -192,7 +208,7 @@ After extracting cuDNN, you will get three folders (bin, lib, include). Then the Installing TensorLayer ------------------------ -For TensorLayer, please refer to the steps mentioned above. +For TensorLayer, please refer to the steps mentioned above. TensorLayer3.0 supports multiple backends. We use TensorFlow backend by default. If you need to use other backends you can refer to the following. .. code-block:: bash @@ -200,8 +216,6 @@ For TensorLayer, please refer to the steps mentioned above. pip3 install tensorflow-gpu   #GPU version (GPU version and CPU version just choose one) pip3 install tensorlayer       #Install tensorlayer - - Issue ======= diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py deleted file mode 100644 index 3e552d3eb..000000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import numpy as np -import mindspore.nn as nn -import mindspore.ops.operations as P -from mindspore.ops import composite as C -from mindspore.common import dtype as mstype -from mindspore import context, Tensor, ParameterTuple -from mindspore.common.initializer import TruncatedNormal -from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum -import tensorlayer as tl -import mindspore as ms -import tensorflow as tf -import time - -context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") - - -def fc_with_initialize(input_channels, out_channels): - """weight initial for fc layer""" - weight = weight_variable() - bias = weight_variable() - return nn.Dense(input_channels, out_channels, weight, bias) - - -def weight_variable(): - """weight initial""" - return TruncatedNormal(0.02) - - -class MLP(nn.Cell): - """ - Lenet network - Args: - num_class (int): Num classes. Default: 10. - - Returns: - Tensor, output tensor - - Examples: - >>> MLP(num_class=10) - """ - - def __init__(self, num_class=10): - super(MLP, self).__init__() - self.num_class = num_class - self.fc1 = fc_with_initialize(784, 800) - self.fc2 = fc_with_initialize(800, 800) - self.fc3 = fc_with_initialize(800, self.num_class) - self.relu = nn.ReLU() - - def construct(self, x): - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.relu(x) - x = self.fc3(x) - return x - - -class GradWrap(nn.Cell): - """ GradWrap definition """ - - def __init__(self, network): - super(GradWrap, self).__init__(auto_prefix=False) - self.network = network - self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) - - def construct(self, x, label): - weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield _input, _target - - -net = MLP() -optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) -net_with_criterion = WithLossCell(net, criterion) -train_network = GradWrap(net_with_criterion) -train_network.set_train() - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) -shuffle_buffer_size = 128 -batch_size = 128 -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.batch(batch_size) -n_epoch = 50 - -for epoch in range(n_epoch): - start_time = time.time() - train_network.set_train() - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in train_ds: - X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) - y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) - output = net(X_batch) - loss_output = criterion(output, y_batch) - grads = train_network(X_batch, y_batch) - success = optimizer(grads) - loss = loss_output.asnumpy() - train_loss += loss - n_iter += 1 - # train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - # print(" train acc: {}".format(train_acc / n_iter)) - print(" triain weights ", train_network.trainable_params()[0].data) diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py similarity index 100% rename from examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py rename to examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py diff --git a/examples/model_zoo/__init__.py b/examples/model_zoo/__init__.py index e69de29bb..2fbe814aa 100644 --- a/examples/model_zoo/__init__.py +++ b/examples/model_zoo/__init__.py @@ -0,0 +1,6 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from .vgg import vgg16, vgg19 +from .yolo import YOLOv4 +from .resnet import ResNet50 \ No newline at end of file diff --git a/examples/model_zoo/pretrained_resnet50.py b/examples/model_zoo/pretrained_resnet50.py index cac33eb1d..9c9761841 100644 --- a/examples/model_zoo/pretrained_resnet50.py +++ b/examples/model_zoo/pretrained_resnet50.py @@ -14,7 +14,7 @@ tl.logging.set_verbosity(tl.logging.DEBUG) # get the whole model -resnet = ResNet50(pretrained=False) +resnet = ResNet50(pretrained=True) resnet.set_eval() img1 = tl.vis.read_image('data/tiger.jpeg') diff --git a/examples/model_zoo/pretrained_yolov4.py b/examples/model_zoo/pretrained_yolov4.py index c8d390886..93c7ddc52 100644 --- a/examples/model_zoo/pretrained_yolov4.py +++ b/examples/model_zoo/pretrained_yolov4.py @@ -1,3 +1,6 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + import numpy as np import cv2 from PIL import Image diff --git a/examples/model_zoo/resnet.py b/examples/model_zoo/resnet.py index c57bef9de..2d134fea1 100644 --- a/examples/model_zoo/resnet.py +++ b/examples/model_zoo/resnet.py @@ -155,7 +155,8 @@ def make_layer(self): def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): - """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3]. + """Pre-trained ResNet50 model. Input shape [?, 224, 224, 3]. + To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68]. Parameters @@ -175,14 +176,14 @@ def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): Classify ImageNet classes, see `tutorial_models_resnet50.py` TODO Modify the usage example according to the model storage location >>> # get the whole model with pretrained weights - >>> resnet = tl.models.ResNet50(pretrained=True) + >>> resnet = ResNet50(pretrained=True) >>> # use for inferencing - >>> output = resnet(img1, is_train=False) - >>> prob = tf.nn.softmax(output)[0].numpy() + >>> output = resnet(img1) + >>> prob = tl.ops.softmax(output)[0].numpy() Extract the features before fc layer - >>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c') - >>> output = resnet(img1, is_train=False) + >>> resnet = ResNet50(pretrained=True, end_with='5c') + >>> output = resnet(img1) Returns ------- @@ -212,14 +213,15 @@ def restore_params(network, path='models'): f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r') - for layer in network.all_layers: - if len(layer.all_weights) == 0: - continue - w_names = list(f[layer.name]) - params = [f[layer.name][n][:] for n in w_names] - # if 'bn' in layer.name: - # params = [x.reshape(1, 1, 1, -1) for x in params] - assign_weights(params, layer) - del params + # TODO Update parameter loading + # for layer in network.all_layers: + # if len(layer.all_weights) == 0: + # continue + # w_names = list(f[layer.name]) + # params = [f[layer.name][n][:] for n in w_names] + # # if 'bn' in layer.name: + # # params = [x.reshape(1, 1, 1, -1) for x in params] + # assign_weights(params, layer) + # del params f.close() diff --git a/examples/model_zoo/vgg.py b/examples/model_zoo/vgg.py index 779635d3c..c53612902 100644 --- a/examples/model_zoo/vgg.py +++ b/examples/model_zoo/vgg.py @@ -196,38 +196,17 @@ def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py `__ With TensorLayer TODO Modify the usage example according to the model storage location + >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg16() + >>> vgg = vgg16() >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg16(pretrained=True) + >>> vgg = vgg16(pretrained=True) >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG16 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg16().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + >>> output = vgg(img) + >>> probs = tl.ops.softmax(output)[0].numpy() """ + if mode == 'dynamic': model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) elif mode == 'static': @@ -259,35 +238,12 @@ def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None): With TensorLayer >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg19() + >>> vgg = vgg19() >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg19(pretrained=True) + >>> vgg = vgg19(pretrained=True) >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG19 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg19(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg19().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + >>> output = vgg(img) + >>> probs = tl.ops.softmax(output)[0].numpy() """ if mode == 'dynamic': diff --git a/examples/model_zoo/yolo.py b/examples/model_zoo/yolo.py index d3784b2bd..d7209dbb7 100644 --- a/examples/model_zoo/yolo.py +++ b/examples/model_zoo/yolo.py @@ -15,6 +15,10 @@ from tensorlayer.layers import Module, SequentialLayer from tensorlayer import logging +__all__ = [ + 'YOLOv4' +] + INPUT_SIZE = 416 weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} @@ -327,11 +331,11 @@ def YOLOv4(NUM_CLASS, pretrained=False): With TensorLayer >>> # get the whole model, without pre-trained YOLOv4 parameters - >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=False) + >>> yolov4 = YOLOv4(NUM_CLASS=80, pretrained=False) >>> # get the whole model, restore pre-trained YOLOv4 parameters - >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=True) + >>> yolov4 = YOLOv4(NUM_CLASS=80, pretrained=True) >>> # use for inferencing - >>> output = yolov4(img, is_train=False) + >>> output = yolov4(img) """ diff --git a/requirements/requirements_multiple_backends.txt b/requirements/requirements_multiple_backends.txt deleted file mode 100644 index c75ad53a2..000000000 --- a/requirements/requirements_multiple_backends.txt +++ /dev/null @@ -1,2 +0,0 @@ -mindspore==1.2.0 -paddlepaddle==2.1.0 \ No newline at end of file diff --git a/requirements/requirements_paddle.txt b/requirements/requirements_paddle.txt new file mode 100644 index 000000000..96b189ace --- /dev/null +++ b/requirements/requirements_paddle.txt @@ -0,0 +1 @@ +paddlepaddle>=2.0.2 \ No newline at end of file diff --git a/tensorlayer/backend/__init__.py b/tensorlayer/backend/__init__.py index 01e5c8376..4533f5b82 100644 --- a/tensorlayer/backend/__init__.py +++ b/tensorlayer/backend/__init__.py @@ -3,4 +3,4 @@ # load ops from .ops import * -from tensorlayer.backend import ops \ No newline at end of file +from tensorlayer.backend import ops diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index e69505215..4f4d01a25 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -33,7 +33,7 @@ else: BACKEND = load_dict['backend'] -# Set backend based on TL_BACKEND flag. +# Set backend based on TL_BACKEND. if 'TL_BACKEND' in os.environ: backend = os.environ['TL_BACKEND'] if backend: @@ -56,7 +56,7 @@ import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), + context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU'), # context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index 4c4d0a2e5..d0f86c052 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -1047,10 +1047,13 @@ def split(value, num_or_size_splits, axis=0, num=None): """ pass + class Floor(Cell): + def __call__(self, *args, **kwargs): raise NotImplementedError + def floor(x): return NotImplementedError @@ -1143,14 +1146,14 @@ def bool_convert_to_tensor(self, x): b = np.ones(shapes) if len(shapes) == 1: for i in range(shapes - 1): - if x[i] == True: + if x[i] ==True: b[i] = 1 else: b[i] = 0 if len(shapes) == 2: for i in range(shapes[0] - 1): for j in range(shapes[1] - 1): - if x[i][j] == True: + if x[i][j] ==True: b[i][j] = 1 else: b[i][j] = 0 @@ -1246,7 +1249,9 @@ def __init__(self): def construct(self, x): return self.sign(x) + class Ceil(Cell): + def __init__(self): super(Ceil, self).__init__() self.ceil = P.Ceil() @@ -1254,6 +1259,7 @@ def __init__(self): def construct(self, x): return self.ceil(x) + def ceil(x): _ceil = P.Ceil() return _ceil(x) diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index a2300934c..36e0ff3fd 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -16,6 +16,7 @@ from mindspore._checkparam import Validator as validator from mindspore.communication.management import get_group_size, get_rank + def padding_format(padding): """ Checks that the padding format correspond format. @@ -581,6 +582,7 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) class Conv3D(Cell): + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): super(Conv3D, self).__init__() self.data_format, self.padding = preprocess_3d_format(data_format, padding) @@ -593,12 +595,10 @@ def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_ch self.ms_stride = strides[2] self.ms_dilation = dilations[2] - self.conv3d = P.Conv3D(out_channel=out_channel, - kernel_size=k_size, - pad_mode=self.padding, - stride=self.ms_stride, - dilation=self.ms_dilation, - data_format=data_format) + self.conv3d = P.Conv3D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, data_format=data_format + ) def construct(self, input, filters): outputs = self.conv3d(input, filters) @@ -705,12 +705,7 @@ def __init__(self, ksize, strides, padding, data_format=None): self.squeeze = P.Squeeze(2) _data_format = 'NCHW' - self.max_pool = P.MaxPool( - kernel_size=_ksize, - strides=_strides, - pad_mode=padding, - data_format=_data_format - ) + self.max_pool = P.MaxPool(kernel_size=_ksize, strides=_strides, pad_mode=padding, data_format=_data_format) def construct(self, inputs): if self.data_format == 'NWC': @@ -733,12 +728,7 @@ def __init__(self, ksize, strides, padding, data_format=None): if data_format == 'NCHW': _strides = (strides[2], strides[3]) - self.maxpool = P.MaxPool( - kernel_size = ksize, - strides = _strides, - pad_mode = padding, - data_format = data_format - ) + self.maxpool = P.MaxPool(kernel_size=ksize, strides=_strides, pad_mode=padding, data_format=data_format) def construct(self, inputs): outputs = self.maxpool(inputs) @@ -773,16 +763,10 @@ def max_pool(input, ksize, strides, padding, data_format=None): _strides = (strides[1], strides[2]) if data_format == 'NCHW': _strides = (strides[2], strides[3]) - outputs = P.MaxPool( - kernel_size=ksize, - strides=_strides, - pad_mode=padding, - data_format=data_format - )(input) + outputs = P.MaxPool(kernel_size=ksize, strides=_strides, pad_mode=padding, data_format=data_format)(input) return outputs - class AvgPool1d(Cell): def __init__(self, ksize, strides, padding, data_format=None): @@ -798,10 +782,9 @@ def __init__(self, ksize, strides, padding, data_format=None): _data_format = 'NCHW' self.squeeze = P.Squeeze(2) - self.avg_pool = P.AvgPool(kernel_size=self.kernel_size, - strides=self.stride, - pad_mode=self.padding, - data_format=_data_format) + self.avg_pool = P.AvgPool( + kernel_size=self.kernel_size, strides=self.stride, pad_mode=self.padding, data_format=_data_format + ) self.reduce_mean = P.ReduceMean(keep_dims=True) self.slice = P.Slice() self.expand = P.ExpandDims() @@ -1185,8 +1168,10 @@ def conv2d_transpose( class Conv3d_transpose(Cell): - def __init__(self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, - in_channels=None + + def __init__( + self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None ): super(Conv3d_transpose, self).__init__() self.data_format, self.padding = preprocess_3d_format(data_format, padding) @@ -1198,21 +1183,15 @@ def __init__(self, strides, padding, data_format='NDHWC', dilations=None, name=N self.dilations = (dilations[2], dilations[3], dilations[4]) self.conv3d_transpose = P.Conv3DTranspose( - in_channel=in_channels, - out_channel=out_channel, - kernel_size=k_size, - mode=1, - pad_mode=padding, - stride=self.strides, - dilation=self.dilations, - data_format=self.data_format) + in_channel=in_channels, out_channel=out_channel, kernel_size=k_size, mode=1, pad_mode=padding, + stride=self.strides, dilation=self.dilations, data_format=self.data_format + ) def construct(self, input, filters): output = self.conv3d_transpose(input, filters) return output - def conv3d_transpose( input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None ): @@ -1252,18 +1231,10 @@ class BatchNorm(Cell): """Batch Normalization base class.""" @cell_attr_register - def __init__(self, - num_features, - epsilon=1e-5, - decay=0.9, - gamma=None, - beta = None, - moving_mean = None, - moving_var = None, - is_train = None, - device_num_each_group=1, - process_groups=0, - data_format='NCHW'): + def __init__( + self, num_features, epsilon=1e-5, decay=0.9, gamma=None, beta=None, moving_mean=None, moving_var=None, + is_train=None, device_num_each_group=1, process_groups=0, data_format='NCHW' + ): super(BatchNorm, self).__init__() if data_format in ["channels_last", "NHWC", "nhwc"]: data_format = "NHWC" @@ -1301,7 +1272,7 @@ def __init__(self, if self.rank_id in self.rank_list[i]: self.is_global = True if SYNC_BN_GROUP_NAME == "": - SYNC_BN_GROUP_NAME = "sync_bn_group"+ str(i) + SYNC_BN_GROUP_NAME = "sync_bn_group" + str(i) management.create_group(SYNC_BN_GROUP_NAME, self.rank_list[i]) # for SyncBatchNorm if self.process_groups != 0: @@ -1311,7 +1282,7 @@ def __init__(self, validator.check_isinstance("process_groups", self.process_groups, list) self._check_rank_ids(self.process_groups, self.rank_size) for i in range(len(self.process_groups)): - validator.check_isinstance("process_groups[" + str(i) +"]", self.process_groups[i], list) + validator.check_isinstance("process_groups[" + str(i) + "]", self.process_groups[i], list) self.group_device_num = len(self.process_groups[i]) if self.rank_id in self.process_groups[i] and self.group_device_num > 1: self.is_global = True @@ -1341,20 +1312,16 @@ def __init__(self, else: self.is_ge_backend = False - self.bn_train = P.BatchNorm(is_training=True, - epsilon=self.eps, - momentum=self.momentum, - data_format=self.format) + self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps, momentum=self.momentum, data_format=self.format) if self.is_global: - self.bn_train = inner.SyncBatchNorm(epsilon=self.eps, - momentum=self.momentum, - group=SYNC_BN_GROUP_NAME, - device_num=self.group_device_num) + self.bn_train = inner.SyncBatchNorm( + epsilon=self.eps, momentum=self.momentum, group=SYNC_BN_GROUP_NAME, device_num=self.group_device_num + ) self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format) - data_parallel_strategy = ((1,), (1,)) - data_parallel_strategy_one = ((1,), ()) + data_parallel_strategy = ((1, ), (1, )) + data_parallel_strategy_one = ((1, ), ()) self.sub_mean = P.Sub().shard(data_parallel_strategy) self.sub_var = P.Sub().shard(data_parallel_strategy) self.mul_mean = P.Mul().shard(data_parallel_strategy_one) @@ -1364,11 +1331,13 @@ def __init__(self, def list_group(self, world_rank, group_size): if group_size > get_group_size(): - raise ValueError("group size can not be greater than local rank size, group size is {}, " - "local_rank_size is {}".format(group_size, get_group_size())) + raise ValueError( + "group size can not be greater than local rank size, group size is {}, " + "local_rank_size is {}".format(group_size, get_group_size()) + ) if len(world_rank) % group_size != 0: raise ValueError("please make your group size correct.") - world_rank_list = zip(*(iter(world_rank),) * group_size) + world_rank_list = zip(*(iter(world_rank), ) * group_size) group_list = [list(i) for i in world_rank_list] return group_list @@ -1388,28 +1357,21 @@ def construct(self, inputs): flag = self.use_batch_statistics if flag: - output = self.bn_train(inputs, - self.gamma, - self.beta, - self.moving_mean, - self.moving_variance)[0] + output = self.bn_train(inputs, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] if len(x_shape) == 5: output = self.reshape(output, x_shape) return output - output = self.bn_infer(inputs, - self.gamma, - self.beta, - self.moving_mean, - self.moving_variance)[0] + output = self.bn_infer(inputs, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] if len(x_shape) == 5: output = self.reshape(output, x_shape) return output def extend_repr(self): return 'num_features={}, eps={}, momentum={}, gamma={}, beta={}, moving_mean={}, moving_variance={}'.format( - self.num_features, self.eps, self.momentum, self.gamma, self.beta, self.moving_mean, self.moving_variance) + self.num_features, self.eps, self.momentum, self.gamma, self.beta, self.moving_mean, self.moving_variance + ) class GroupConv2D(Cell): @@ -1498,12 +1460,13 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size self.depthwise_conv = P.Conv2D( out_channel=self.in_channel * self.depth_multiplier, kernel_size=self.k_size, pad_mode=self.padding, - stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel , data_format=self.data_format + stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel, + data_format=self.data_format ) self.pointwise_conv = P.Conv2D( out_channel=self.out_channel, kernel_size=(1, 1), pad_mode=self.padding, stride=(1, 1), dilation=(1, 1), - mode=1, group=1 , data_format=self.data_format + mode=1, group=1, data_format=self.data_format ) def construct(self, x, depthwise_filters, pointwise_filters): @@ -1623,8 +1586,10 @@ def construct(self, inputs): kernel_h = h - (out_h - 1) * stride_h stride_w = w // out_w kernel_w = w - (out_w - 1) * stride_w - outputs = P.MaxPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), - pad_mode='VALID', data_format=self.data_format)(inputs) + outputs = P.MaxPool( + kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID', + data_format=self.data_format + )(inputs) return outputs diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py index 69044b0ec..60c81fd63 100644 --- a/tensorlayer/backend/ops/paddle_backend.py +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -816,10 +816,13 @@ def split(value, num_or_size_splits, axis=0, num=None): """ pass + class Floor(object): + def __call__(self, *args, **kwargs): raise NotImplementedError + def floor(x): raise NotImplementedError @@ -954,9 +957,11 @@ def __call__(self, x): class Ceil(object): + def __call__(self, *args, **kwargs): raise NotImplementedError + def ceil(x): raise NotImplementedError diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 40d0f1a55..0f28537a9 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -606,6 +606,7 @@ def max_pool(input, ksize, strides, padding, data_format=None): class AvgPool1d(object): + def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) self.ksize = ksize diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index f58632f24..9e45c569b 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -846,9 +846,11 @@ def split(value, num_or_size_splits, axis=0, num=None): class Floor(object): + def __call__(self, x): return tf.floor(x) + def floor(x): return tf.floor(x) @@ -1002,6 +1004,7 @@ def __call__(self, x): class Ceil(object): + def __call__(self, x): return tf.math.ceil(x) diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index e0d33c49d..62970d7d8 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -726,6 +726,7 @@ def max_pool(input, ksize, strides, padding, data_format=None): class AvgPool1d(object): + def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) self.ksize = ksize diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py index cd66fa705..8bebe6859 100644 --- a/tensorlayer/cost/paddle_cost.py +++ b/tensorlayer/cost/paddle_cost.py @@ -24,6 +24,7 @@ 'maxnorm_i_regularizer', ] + def cross_entropy(output, target): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. @@ -102,8 +103,8 @@ def binary_cross_entropy(output, target, epsilon=1e-8): depth = output.shape[-1] target = pd.fluid.layers.one_hot(target, depth=depth) out = pd.fluid.layers.reduce_sum( - -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon)) - ) + -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon)) + ) return out @@ -192,7 +193,6 @@ def absolute_difference_error(output, target, is_mean=False, axis=-1, name="abso """ - if is_mean: loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_mean(pd.abs(output - target), axis)) else: @@ -600,4 +600,4 @@ def huber_loss( """ - raise NotImplementedError("Not Implemented.") \ No newline at end of file + raise NotImplementedError("Not Implemented.") diff --git a/tensorlayer/cost/tensorflow_cost.py b/tensorlayer/cost/tensorflow_cost.py index b07acad19..d819aa10f 100644 --- a/tensorlayer/cost/tensorflow_cost.py +++ b/tensorlayer/cost/tensorflow_cost.py @@ -236,7 +236,7 @@ def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): Examples --------- >>> import tensorlayer as tl - >>> outputs = tl.act.pixel_wise_softmax(outputs) + >>> outputs = tl.ops.softmax(outputs) >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_) References @@ -492,20 +492,21 @@ def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details= >>> vocab_size = 10000 >>> embedding_size = 256 >>> ni = tl.layers.Input([batch_size, None], dtype=tf.int64) - >>> net = tl.layers.Embedding( + >>> net_lits = [] + >>> net_list.append(tl.layers.Embedding( ... vocabulary_size = vocab_size, ... embedding_size = embedding_size, - ... name = 'seq_embedding')(ni) - >>> net = tl.layers.RNN( + ... name = 'seq_embedding')) + >>> net_list.append(tl.layers.RNN( ... cell =tf.keras.layers.LSTMCell(units=embedding_size, dropout=0.1), ... return_seq_2d = True, - ... name = 'dynamicrnn')(net) - >>> net = tl.layers.Dense(n_units=vocab_size, name="output")(net) - >>> model = tl.models.Model(inputs=ni, outputs=net) + ... name = 'dynamicrnn')) + >>> net_list.append(tl.layers.Dense(n_units=vocab_size, name="output")) + >>> model = tl.layers.SequentialLayer(net_list) >>> input_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) >>> target_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) >>> input_mask = np.random.randint(0, 2, size=(batch_size, 10), dtype=np.int64) - >>> outputs = model(input_seqs, is_train=True) + >>> outputs = model(input_seqs) >>> loss = tl.cost.cross_entropy_seq_with_mask(outputs, target_seqs, input_mask) """ diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py index d001d56a6..04d4b3327 100644 --- a/tensorlayer/dataflow/paddle_data.py +++ b/tensorlayer/dataflow/paddle_data.py @@ -50,7 +50,7 @@ def __len__(self): class FromSlices(Dataset): - def __init__(self, datas, transform = None): + def __init__(self, datas, transform=None): self.datas = datas[0] self.labels = datas[1] self.transform = transform diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index 5c8805ac7..13505770b 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -2603,6 +2603,7 @@ def ms_variables_to_numpy(variables): results = [v.data.asnumpy() for v in var_list] return results + def pd_variables_to_numpy(variables): if not isinstance(variables, list): var_list = [variables] diff --git a/tensorlayer/initializers/__init__.py b/tensorlayer/initializers/__init__.py index 80557bd31..908d53aa1 100644 --- a/tensorlayer/initializers/__init__.py +++ b/tensorlayer/initializers/__init__.py @@ -22,4 +22,4 @@ random_uniform = RandomUniform random_normal = RandomNormal truncated_normal = TruncatedNormal -he_normal = HeNormal \ No newline at end of file +he_normal = HeNormal diff --git a/tensorlayer/initializers/mindspore_initializers.py b/tensorlayer/initializers/mindspore_initializers.py index 52e0809ed..4c0ef8450 100644 --- a/tensorlayer/initializers/mindspore_initializers.py +++ b/tensorlayer/initializers/mindspore_initializers.py @@ -62,6 +62,7 @@ def from_config(cls, config): class Zeros(Initializer): """Initializer that generates tensors initialized to 0. """ + def __init__(self): self.zero = initializer.Zero() @@ -74,6 +75,7 @@ def __call__(self, shape, dtype=tl.float32): class Ones(Initializer): """Initializer that generates tensors initialized to 1. """ + def __init__(self): self.one = initializer.One() diff --git a/tensorlayer/initializers/paddle_initializers.py b/tensorlayer/initializers/paddle_initializers.py index 22ffa7a55..18e69ed9e 100644 --- a/tensorlayer/initializers/paddle_initializers.py +++ b/tensorlayer/initializers/paddle_initializers.py @@ -72,8 +72,7 @@ def __init__(self, minval=-0.05, maxval=0.05, seed=0): assert minval is not None, 'low should not be None' assert maxval is not None, 'high should not be None' assert maxval >= minval, 'high should greater or equal than low' - super(RandomUniform, self).__init__( - low=minval, high=maxval, seed=seed, diag_num=0, diag_step=0, diag_val=1.0) + super(RandomUniform, self).__init__(low=minval, high=maxval, seed=seed, diag_num=0, diag_step=0, diag_val=1.0) self.minval = minval self.maxval = maxval self.seed = seed @@ -149,8 +148,7 @@ class HeNormal(MSRAInitializer): """ def __init__(self, seed=0): - super(HeNormal, self).__init__( - uniform=False, fan_in=None, seed=seed) + super(HeNormal, self).__init__(uniform=False, fan_in=None, seed=seed) self.seed = seed def get_config(self): diff --git a/tensorlayer/initializers/tensorflow_initializers.py b/tensorlayer/initializers/tensorflow_initializers.py index 8865216af..5009969b7 100644 --- a/tensorlayer/initializers/tensorflow_initializers.py +++ b/tensorlayer/initializers/tensorflow_initializers.py @@ -59,6 +59,14 @@ def from_config(cls, config): class Zeros(Initializer): """Initializer that generates tensors initialized to 0. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.zeros() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __call__(self, shape, dtype=tl.float32): @@ -67,6 +75,14 @@ def __call__(self, shape, dtype=tl.float32): class Ones(Initializer): """Initializer that generates tensors initialized to 1. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.ones() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __call__(self, shape, dtype=tl.float32): @@ -81,6 +97,13 @@ class Constant(Initializer): value : A python scalar or a numpy array. The assigned value. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.constant(value=10) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, value=0): @@ -105,6 +128,13 @@ class RandomUniform(Initializer): seed : A Python integer. Used to seed the random generator. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.random_uniform(minval=-0.05, maxval=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): @@ -130,6 +160,16 @@ class RandomNormal(Initializer): Standard deviation of the random values to generate. seed : A Python integer. Used to seed the random generator. + + minval=-0.05, maxval=0.05 + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.random_normal(mean=0.0, stddev=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -161,6 +201,14 @@ class TruncatedNormal(Initializer): Standard deviation of the andom values to generate. seed : A Python integer. Used to seed the random generator. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.truncated_normal(mean=0.0, stddev=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -183,6 +231,13 @@ class HeNormal(Initializer): seed : A Python integer. Used to seed the random generator. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.he_normal() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, seed=None): diff --git a/tensorlayer/layers/activation.py b/tensorlayer/layers/activation.py index c5a0de383..f4298478d 100644 --- a/tensorlayer/layers/activation.py +++ b/tensorlayer/layers/activation.py @@ -7,16 +7,7 @@ from tensorlayer.layers.core import Module __all__ = [ - 'PRelu', - 'PRelu6', - 'PTRelu6', - 'LeakyReLU', - 'LeakyReLU6', - 'LeakyTwiceRelu6', - 'Ramp', - 'Swish', - 'HardTanh', - 'Mish' + 'PRelu', 'PRelu6', 'PTRelu6', 'LeakyReLU', 'LeakyReLU6', 'LeakyTwiceRelu6', 'Ramp', 'Swish', 'HardTanh', 'Mish' ] @@ -41,7 +32,7 @@ class PRelu(Module): Examples ----------- >>> inputs = tl.layers.Input([10, 5]) - >>> prelulayer = tl.layers.PRelu(channel_shared=True) + >>> prelulayer = tl.layers.PRelu(channel_shared=True, in_channels=5)(inputs) References ----------- @@ -141,6 +132,11 @@ class PRelu6(Module): name : None or str A unique layer name. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.PRelu6(channel_shared=True, in_channels=5)(inputs) + References ----------- - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `__ @@ -249,6 +245,11 @@ class PTRelu6(Module): name : None or str A unique layer name. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.PTRelu6(channel_shared=True, in_channels=5)(inputs) + References ----------- - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `__ @@ -347,6 +348,11 @@ class Ramp(Module): Tensor A ``Tensor`` in the same type as ``x``. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.Ramp()(inputs) + """ def __init__(self, v_min=0, v_max=1): @@ -380,7 +386,6 @@ class LeakyReLU(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyReLU(alpha=0.5)(net) @@ -429,7 +434,6 @@ class LeakyReLU6(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyReLU6(alpha=0.5)(net) @@ -487,7 +491,6 @@ class LeakyTwiceRelu6(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyTwiceRelu6(alpha_low=0.5, alpha_high=0.2)(net) @@ -535,6 +538,11 @@ class Swish(Module): name: str function name (optional). + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Swish()(net) + Returns ------- Tensor @@ -563,6 +571,11 @@ class HardTanh(Module): name : str The function name (optional). + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.HardTanh()(net) + Returns ------- Tensor @@ -588,6 +601,11 @@ class Mish(Module): x : Tensor input. + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Mish()(net) + Returns ------- Tensor diff --git a/tensorlayer/layers/convolution/binary_conv.py b/tensorlayer/layers/convolution/binary_conv.py index 5fa9b541e..f949a48ce 100644 --- a/tensorlayer/layers/convolution/binary_conv.py +++ b/tensorlayer/layers/convolution/binary_conv.py @@ -4,7 +4,6 @@ import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module -from tensorlayer.backend import BACKEND __all__ = [ 'BinaryConv2d', diff --git a/tensorlayer/layers/convolution/deformable_conv.py b/tensorlayer/layers/convolution/deformable_conv.py index 1e8920850..9de896cd9 100644 --- a/tensorlayer/layers/convolution/deformable_conv.py +++ b/tensorlayer/layers/convolution/deformable_conv.py @@ -9,9 +9,11 @@ 'DeformableConv2d', ] + class DeformableConv2d(Module): """The :class:`DeformableConv2d` class is a 2D `Deformable Convolutional Networks `__. + Parameters ---------- offset_layer : tl.Tensor @@ -34,6 +36,7 @@ class DeformableConv2d(Module): The number of in channels. name : str A unique layer name. + Examples -------- With TensorLayer @@ -50,6 +53,7 @@ class DeformableConv2d(Module): >>> deformconv2 = tl.layers.DeformableConv2d( ... offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2' ... )(deformconv1) + References ---------- - The deformation operation was adapted from the implementation in `here `__ @@ -57,6 +61,7 @@ class DeformableConv2d(Module): ----- - The padding is fixed to 'SAME'. - The current implementation is not optimized for memory usgae. Please use it carefully. + """ # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release @@ -94,7 +99,6 @@ def __init__( ) ) - def __repr__(self): actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( @@ -163,7 +167,9 @@ def forward(self, inputs): input_deform = self._tf_batch_map_offsets(inputs, offset, grid_offset) outputs = self.conv3d(input=input_deform, filters=self.W) - outputs = tl.ops.reshape(tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter]) + outputs = tl.ops.reshape( + tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter] + ) if self.b_init: outputs = self.bias_add(outputs, self.b) if self.act: @@ -294,4 +300,3 @@ def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel]) return mapped_vals - diff --git a/tensorlayer/layers/convolution/group_conv.py b/tensorlayer/layers/convolution/group_conv.py index cbbbd473f..079961e69 100644 --- a/tensorlayer/layers/convolution/group_conv.py +++ b/tensorlayer/layers/convolution/group_conv.py @@ -13,6 +13,7 @@ class GroupConv2d(Module): """The :class:`GroupConv2d` class is 2D grouped convolution, see `here `__. + Parameters -------------- n_filter : int @@ -39,6 +40,7 @@ class GroupConv2d(Module): The number of in channels. name : None or str A unique layer name. + Examples --------- With TensorLayer @@ -48,6 +50,7 @@ class GroupConv2d(Module): ... )(net) >>> print(groupconv2d) >>> output shape : (8, 12, 12, 64) + """ def __init__( diff --git a/tensorlayer/layers/convolution/quan_conv.py b/tensorlayer/layers/convolution/quan_conv.py index 7a46c3d77..f89c64851 100644 --- a/tensorlayer/layers/convolution/quan_conv.py +++ b/tensorlayer/layers/convolution/quan_conv.py @@ -149,8 +149,9 @@ def build(self, inputs_shape): self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) - self.conv2d = tl.ops.Conv2D(strides=self.strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate) + self.conv2d = tl.ops.Conv2D( + strides=self.strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate + ) def forward(self, inputs): if self._forward_state == False: diff --git a/tensorlayer/layers/convolution/quan_conv_bn.py b/tensorlayer/layers/convolution/quan_conv_bn.py index 335742b15..cec940d52 100644 --- a/tensorlayer/layers/convolution/quan_conv_bn.py +++ b/tensorlayer/layers/convolution/quan_conv_bn.py @@ -237,4 +237,4 @@ def _w_fold(self, w, gama, var, epsilon): return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) def _bias_fold(self, beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/layers/convolution/separable_conv.py b/tensorlayer/layers/convolution/separable_conv.py index 73390f1ea..fe721b65d 100644 --- a/tensorlayer/layers/convolution/separable_conv.py +++ b/tensorlayer/layers/convolution/separable_conv.py @@ -15,6 +15,7 @@ class SeparableConv1d(Module): """The :class:`SeparableConv1d` class is a 1D depthwise separable convolutional layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters ------------ n_filter : int @@ -43,6 +44,7 @@ class SeparableConv1d(Module): The number of in channels. name : None or str A unique layer name. + Examples -------- With TensorLayer @@ -160,6 +162,7 @@ def forward(self, inputs): class SeparableConv2d(Module): """The :class:`SeparableConv2d` class is a 2D depthwise separable convolutional layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters ------------ n_filter : int @@ -188,6 +191,7 @@ class SeparableConv2d(Module): The number of in channels. name : None or str A unique layer name. + Examples -------- With TensorLayer @@ -195,6 +199,7 @@ class SeparableConv2d(Module): >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tl.ReLU, name='separable_2d')(net) >>> print(separableconv2d) >>> output shape : (8, 24, 24, 32) + """ def __init__( @@ -308,4 +313,3 @@ def forward(self, inputs): if self.act_init_flag: outputs = self.act(outputs) return outputs - diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index 5af052262..a3d08f247 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -5,7 +5,6 @@ import tensorlayer as tl from tensorlayer import logging - __all__ = [ 'Conv1d', 'Conv2d', diff --git a/tensorlayer/layers/convolution/ternary_conv.py b/tensorlayer/layers/convolution/ternary_conv.py index b8ebfd4f4..74e96ecee 100644 --- a/tensorlayer/layers/convolution/ternary_conv.py +++ b/tensorlayer/layers/convolution/ternary_conv.py @@ -140,9 +140,9 @@ def build(self, inputs_shape): self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) - self.conv2d = tl.ops.Conv2D(strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate) - + self.conv2d = tl.ops.Conv2D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate + ) def forward(self, inputs): if self._forward_state == False: diff --git a/tensorlayer/layers/core/common.py b/tensorlayer/layers/core/common.py index a0ace214d..839908cdf 100644 --- a/tensorlayer/layers/core/common.py +++ b/tensorlayer/layers/core/common.py @@ -37,6 +37,7 @@ def str2act(act): raise Exception("Unsupported act: {}".format(act)) return _act_dict[act] + def _save_weights(net, file_path, format=None): """Input file_path, save model weights into a file of given format. Use net.load_weights() to restore. @@ -101,6 +102,7 @@ def _save_weights(net, file_path, format=None): "Other format is not supported now." ) + def _load_weights(net, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by net.save_weights(). diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index 4d346b47e..50872aa36 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -43,7 +43,8 @@ def __init__(self, name=None, act=None, *args, **kwargs): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -67,7 +68,6 @@ def __init__(self, name=None, act=None, *args, **kwargs): # Layer training state self.is_train = True - # layer forward state self._forward_state = False @@ -261,8 +261,7 @@ def __init__(self, *args): def __getitem__(self, index): if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) + return self.__class__(OrderedDict(list(self._layers.items())[index])) index = self._valid_index(len(self), index) return list(self._layers.values())[index] @@ -312,12 +311,12 @@ def _valid_index(self, layer_num, index): if not isinstance(index, int): raise TypeError("Index {} is not int type") if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) + raise IndexError( + "Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index) + ) return index % layer_num def _valid_module(self, layer): if issubclass(layer.__class__, Module): return True raise TypeError('Module {} is not subclass of Module'.format(layer)) - diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index b15f77ed1..22a6021b0 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -45,7 +45,8 @@ def __init__(self, name=None, act=None, *args, **kwargs): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -177,8 +178,7 @@ def __call__(self, *inputs, **kwargs): with program_desc_tracing_guard(False): self._build_once(*inputs, **kwargs) if parallel_helper._is_data_parallel_mode(): - parallel_helper._broadcast_parameters( - self._parameters.values()) + parallel_helper._broadcast_parameters(self._parameters.values()) self._paddle_built = True outputs = self.forward(*inputs, **kwargs) @@ -200,26 +200,16 @@ def _get_weights(self, var_name, shape, init=None, trainable=True, transposed=No self.trainable = trainable return w_tmp - def create_parameter(self, - shape, - attr=None, - dtype=None, - is_bias=False, - default_initializer=None): + def create_parameter(self, shape, attr=None, dtype=None, is_bias=False, default_initializer=None): """Create parameters for this layer.""" temp_attr = copy.deepcopy(attr) if isinstance(temp_attr, six.string_types) and temp_attr == "": temp_attr = None - return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, - default_initializer) + return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, default_initializer) @property def all_weights(self): - ret = [ - param - for _, param in self.named_parameters( - include_sublayers=True) - ] + ret = [param for _, param in self.named_parameters(include_sublayers=True)] return ret @property @@ -239,4 +229,4 @@ def save_weights(self, file_path, format=None): def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights().""" - _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) \ No newline at end of file + _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index 8eec34ebf..01fa79394 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -75,7 +75,8 @@ def __init__(self, name=None, act=None, *args, **kwargs): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -205,7 +206,6 @@ def _set_mode_for_layers(self, is_train): if isinstance(layer, Module): layer.is_train = is_train - def set_train(self): """Set this network in training mode. After calling this method, all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. @@ -591,8 +591,7 @@ def __init__(self, *args): def __getitem__(self, index): if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) + return self.__class__(OrderedDict(list(self._layers.items())[index])) index = self._valid_index(len(self), index) return list(self._layers.values())[index] @@ -619,7 +618,6 @@ def __delitem__(self, index): def __len__(self): return len(self._layers) - def append(self, layer): if self._valid_module(layer): self._layers[str(len(self))] = layer @@ -638,11 +636,12 @@ def _valid_index(self, layer_num, index): if not isinstance(index, int): raise TypeError("Index {} is not int type") if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) + raise IndexError( + "Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index) + ) return index % layer_num def _valid_module(self, layer): if issubclass(layer.__class__, Module): return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) \ No newline at end of file + raise TypeError('Module {} is not subclass of Module'.format(layer)) diff --git a/tensorlayer/layers/dense/base_dense.py b/tensorlayer/layers/dense/base_dense.py index a0470300b..acc3447af 100644 --- a/tensorlayer/layers/dense/base_dense.py +++ b/tensorlayer/layers/dense/base_dense.py @@ -34,10 +34,10 @@ class Dense(Module): With TensorLayer >>> net = tl.layers.Input([100, 50], name='input') - >>> dense = tl.layers.Dense(n_units=800, act=tl.ops.relu, in_channels=50, name='dense_1') + >>> dense = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=50, name='dense_1') >>> print(dense) Dense(n_units=800, relu, in_channels='50', name='dense_1') - >>> tensor = tl.layers.Dense(n_units=800, act=tl.ops.relu, name='dense_2')(net) + >>> tensor = tl.layers.Dense(n_units=800, act=tl.ReLU, name='dense_2')(net) >>> print(tensor) tf.Tensor([...], shape=(100, 800), dtype=float32) @@ -47,7 +47,6 @@ class Dense(Module): """ - # @cell_attr_register def __init__( self, n_units, diff --git a/tensorlayer/layers/dense/binary_dense.py b/tensorlayer/layers/dense/binary_dense.py index 90c6e2b49..24fab5cf1 100644 --- a/tensorlayer/layers/dense/binary_dense.py +++ b/tensorlayer/layers/dense/binary_dense.py @@ -34,6 +34,14 @@ class BinaryDense(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.BinaryDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.BinaryDense(n_units=10, name='output')(net) + >>> output shape : (10, 10) + """ def __init__( @@ -90,7 +98,6 @@ def build(self, inputs_shape): self.matmul = tl.ops.MatMul() - def forward(self, inputs): if self._forward_state == False: if self._built == False: diff --git a/tensorlayer/layers/dense/dorefa_dense.py b/tensorlayer/layers/dense/dorefa_dense.py index bf35c14d4..f54a228d3 100644 --- a/tensorlayer/layers/dense/dorefa_dense.py +++ b/tensorlayer/layers/dense/dorefa_dense.py @@ -39,6 +39,14 @@ class DorefaDense(Module): name : a str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.DorefaDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DorefaDense(n_units=10, name='output')(net) + >>> output shape :(10, 10) + """ def __init__( @@ -113,4 +121,4 @@ def forward(self, inputs): outputs = self.bias_add(outputs, self.b) if self.act: outputs = self.act(outputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/dense/dropconnect.py b/tensorlayer/layers/dense/dropconnect.py index 178ea2cb7..3e0e3e2ef 100644 --- a/tensorlayer/layers/dense/dropconnect.py +++ b/tensorlayer/layers/dense/dropconnect.py @@ -38,13 +38,13 @@ class DropconnectDense(Module): Examples -------- - >>> net = tl.layers.Input([None, 784], name='input') - >>> net = tl.layers.DropconnectDense(keep=0.8, - ... n_units=800, act=tl.ReLU, name='relu1')(net) - >>> net = tl.layers.DropconnectDense(keep=0.5, - ... n_units=800, act=tl.ReLU, name='relu2')(net) - >>> net = tl.layers.DropconnectDense(keep=0.5, - ... n_units=10, name='output')(net) + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.DropconnectDense(keep=0.8, n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DropconnectDense(keep=0.5, n_units=800, act=tl.ReLU, name='relu2')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DropconnectDense(keep=0.5, n_units=10, name='output')(net) + >>> output shape :(10, 10) References ---------- diff --git a/tensorlayer/layers/dense/quan_dense.py b/tensorlayer/layers/dense/quan_dense.py index 460402398..a055675f9 100644 --- a/tensorlayer/layers/dense/quan_dense.py +++ b/tensorlayer/layers/dense/quan_dense.py @@ -37,6 +37,14 @@ class QuanDense(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.BinaryDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.BinaryDense(n_units=10, name='output')(net) + >>> output shape :(10, 10) + """ def __init__( diff --git a/tensorlayer/layers/dense/quan_dense_bn.py b/tensorlayer/layers/dense/quan_dense_bn.py index 3f811a2a7..0c40c7dff 100644 --- a/tensorlayer/layers/dense/quan_dense_bn.py +++ b/tensorlayer/layers/dense/quan_dense_bn.py @@ -1,14 +1,13 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - - import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module from tensorflow.python.training import moving_averages -from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow, - mean_var_with_update, w_fold, bias_fold) +from tensorlayer.layers.utils import ( + quantize_active_overflow, quantize_weight_overflow, mean_var_with_update, w_fold, bias_fold +) __all__ = [ 'QuanDenseWithBN', @@ -142,7 +141,6 @@ def build(self, inputs_shape): "moving_variacne", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False ) - def forward(self, inputs): if self._forward_state == False: if self._built == False: diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index 548360371..bbd8d4dd0 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -415,9 +415,23 @@ def TimeDistributedLayer(*args, **kwargs): # raise NonExistingLayerError("TimeDistributedLayer(x1, x2, name='a') --> TimeDistributed(name='a')(x1, x2)") raise NonExistingLayerError("TimeDistributedLayer is removed for TF 2.0, please use eager mode instead." + __log__) -__all__ += [ - 'LayerList' -] + +__all__ += ['LayerList'] + def LayerList(*args, **kwargs): - raise NonExistingLayerError("LayerList(list)(input_data) --> SequentialLayer(list)(input_data)" + __log__) \ No newline at end of file + raise NonExistingLayerError("LayerList(list)(input_data) --> SequentialLayer(list)(input_data)" + __log__) + + +__all__ += ['ModelLayer'] + + +def ModelLayer(*args, **kwargs): + raise NonExistingLayerError("ModelLayer is removed for TensorLayer 3.0.") + + +__all__ += ['Seq2seqLuongAttention'] + + +def Seq2seqLuongAttention(*args, **kwargs): + raise NonExistingLayerError("Seq2seqLuongAttention is removed for TensorLayer 3.0.") diff --git a/tensorlayer/layers/dropout.py b/tensorlayer/layers/dropout.py index 8dccda605..54c9ba5fd 100644 --- a/tensorlayer/layers/dropout.py +++ b/tensorlayer/layers/dropout.py @@ -25,6 +25,11 @@ class Dropout(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Dropout(keep=0.2)(net) + """ def __init__(self, keep, seed=0, name=None): #"dropout"): diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index 249f58f18..84e4b56c1 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -4,7 +4,6 @@ import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module -# from tensorlayer.layers.core import LayersConfig __all__ = ['OneHot', 'Word2vecEmbedding', 'Embedding', 'AverageEmbedding'] @@ -25,21 +24,19 @@ class OneHot(Module): axis : None or int The axis. dtype : None or TensorFlow dtype - The data type, None means tf.float32. + The data type, None means tl.float32. name : str A unique layer name. Examples --------- - >>> import tensorflow as tf - >>> import tensorlayer as tl >>> net = tl.layers.Input([32], dtype=tl.int32) >>> onehot = tl.layers.OneHot(depth=8) >>> print(onehot) OneHot(depth=8, name='onehot') >>> tensor = tl.layers.OneHot(depth=8)(net) >>> print(tensor) - tf.Tensor([...], shape=(32, 8), dtype=float32) + Tensor([...], shape=(32, 8), dtype=float32) """ @@ -141,12 +138,11 @@ class Word2vecEmbedding(Module): -------- Word2Vec With TensorLayer (Example in `examples/text_word_embedding/tutorial_word2vec_basic.py`) - >>> import tensorflow as tf >>> import tensorlayer as tl >>> batch_size = 8 >>> embedding_size = 50 - >>> inputs = tl.layers.Input([batch_size], dtype=tf.int32) - >>> labels = tl.layers.Input([batch_size, 1], dtype=tf.int32) + >>> inputs = tl.layers.Input([batch_size], dtype=tl.int32) + >>> labels = tl.layers.Input([batch_size, 1], dtype=tl.int32) >>> emb_net = tl.layers.Word2vecEmbedding( >>> vocabulary_size=10000, >>> embedding_size=embedding_size, @@ -331,15 +327,14 @@ class Embedding(Module): Examples -------- - >>> import tensorflow as tf >>> import tensorlayer as tl - >>> input = tl.layers.Input([8, 100], dtype=tf.int32) + >>> input = tl.layers.Input([8, 100], dtype=tl.int32) >>> embed = tl.layers.Embedding(vocabulary_size=1000, embedding_size=50, name='embed') >>> print(embed) Embedding(vocabulary_size=1000, embedding_size=50) >>> tensor = embed(input) >>> print(tensor) - tf.Tensor([...], shape=(8, 100, 50), dtype=float32) + Tensor([...], shape=(8, 100, 50), dtype=float32) """ @@ -423,17 +418,16 @@ class AverageEmbedding(Module): Examples --------- - >>> import tensorflow as tf >>> import tensorlayer as tl >>> batch_size = 8 >>> length = 5 - >>> input = tl.layers.Input([batch_size, length], dtype=tf.int32) + >>> input = tl.layers.Input([batch_size, length], dtype=tl.int32) >>> avgembed = tl.layers.AverageEmbedding(vocabulary_size=1000, embedding_size=50, name='avg') >>> print(avgembed) AverageEmbedding(vocabulary_size=1000, embedding_size=50, pad_value=0) >>> tensor = avgembed(input) >>> print(tensor) - tf.Tensor([...], shape=(8, 50), dtype=float32) + Tensor([...], shape=(8, 50), dtype=float32) """ diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 9f765c518..9c48da201 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorlayer as tl - from tensorlayer import logging from tensorlayer.layers.core import Module @@ -77,6 +76,7 @@ class Tile(Module): -------- >>> x = tl.layers.Input([10, 3], name='in') >>> y = tl.layers.Tile(multiples=[2, 3])(x) + """ def __init__(self, multiples=None, name=None): #'tile'): diff --git a/tensorlayer/layers/image_resampling.py b/tensorlayer/layers/image_resampling.py index a676a34ac..f0173883c 100644 --- a/tensorlayer/layers/image_resampling.py +++ b/tensorlayer/layers/image_resampling.py @@ -37,9 +37,9 @@ class UpSampling2d(Module): --------- With TensorLayer - >>> ni = tl.layers.Input([None, 50, 50, 32], name='input') + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') >>> ni = tl.layers.UpSampling2d(scale=(2, 2))(ni) - >>> output shape : [None, 100, 100, 32] + >>> output shape : [10, 100, 100, 32] """ @@ -85,6 +85,7 @@ def forward(self, inputs): outputs = self.resize(inputs) return outputs + class DownSampling2d(Module): """The :class:`DownSampling2d` class is down-sampling 2D layer. @@ -111,21 +112,13 @@ class DownSampling2d(Module): --------- With TensorLayer - >>> ni = tl.layers.Input([None, 50, 50, 32], name='input') + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') >>> ni = tl.layers.DownSampling2d(scale=(2, 2))(ni) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ - def __init__( - self, - scale, - method='bilinear', - antialias=False, - data_format='channels_last', - name=None, - ksize=None - ): + def __init__(self, scale, method='bilinear', antialias=False, data_format='channels_last', name=None, ksize=None): super(DownSampling2d, self).__init__(name) self.method = method self.antialias = antialias @@ -153,8 +146,7 @@ def __repr__(self): def build(self, inputs_shape): scale = [1.0 / self.scale[0], 1.0 / self.scale[1]] self.resize = tl.ops.Resize( - scale=scale, method=self.method, antialias=self.antialias, data_format=self.data_format, - ksize=self.ksize + scale=scale, method=self.method, antialias=self.antialias, data_format=self.data_format, ksize=self.ksize ) def forward(self, inputs): @@ -167,4 +159,4 @@ def forward(self, inputs): """ outputs = self.resize(inputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/inputs.py b/tensorlayer/layers/inputs.py index 34a877869..cbcd76f0a 100644 --- a/tensorlayer/layers/inputs.py +++ b/tensorlayer/layers/inputs.py @@ -64,6 +64,13 @@ def Input(shape, init=tl.initializers.ones(), dtype=tl.float32, name=None): name : None or str A unique layer name. + Examples + --------- + With TensorLayer + + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') + >>> output shape : [10, 50, 50, 32] + """ input_layer = _InputLayer(shape, dtype=dtype, name=name, init=init) outputs = input_layer() diff --git a/tensorlayer/layers/lambda_layers.py b/tensorlayer/layers/lambda_layers.py index 1184f2925..75f95c19b 100644 --- a/tensorlayer/layers/lambda_layers.py +++ b/tensorlayer/layers/lambda_layers.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorflow as tf - from tensorlayer import logging from tensorlayer.files import utils from tensorlayer.layers.core import Module @@ -54,7 +53,7 @@ class Lambda(Module): Please avoid using Model.save() / Model.load() to save / load models that contain such Lambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. Note: In this case, fn_weights should be a list, and then the trainable weights in this Lambda layer can be added into the weights of the whole model. - >>> a = tf.Variable(1.0) + >>> a = tl.ops.Variable(1.0) >>> def func(x): >>> return x + a >>> x = tl.layers.Input([8, 3], name='input') @@ -65,15 +64,15 @@ class Lambda(Module): This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). >>> layers = [ - >>> tf.keras.layers.Dense(10, activation=tf.nn.relu), - >>> tf.keras.layers.Dense(5, activation=tf.nn.sigmoid), - >>> tf.keras.layers.Dense(1, activation=tf.identity) + >>> tl.layers.Dense(10, act=tl.Relu), + >>> tl.layers.Dense(5, act=tl.Relu), + >>> tl.layers.Dense(1, activation=tf.identity) >>> ] - >>> perceptron = tf.keras.Sequential(layers) + >>> perceptron = tl.layers.SequentialLayer(layers) >>> # in order to compile keras model and get trainable_variables of the keras model >>> _ = perceptron(np.random.random([100, 5]).astype(np.float32)) >>> - >>> class CustomizeModel(tl.models.Model): + >>> class CustomizeModel(tl.layers.Module): >>> def __init__(self): >>> super(CustomizeModel, self).__init__() >>> self.dense = tl.layers.Dense(in_channels=1, n_units=5) @@ -86,7 +85,7 @@ class Lambda(Module): >>> >>> optimizer = tl.optimizers.Adam(learning_rate=0.1) >>> model = CustomizeModel() - >>> model.train() + >>> model.set_train() >>> >>> for epoch in range(50): >>> with tf.GradientTape() as tape: @@ -185,7 +184,6 @@ class ElementwiseLambda(Module): Non-parametric and with args case This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - >>> # z = mean + noise * tf.exp(std * 0.5) + foo >>> def func(noise, mean, std, foo=42): >>> return mean + noise * tf.exp(std * 0.5) + foo >>> noise = tl.layers.Input([100, 1]) @@ -197,7 +195,6 @@ class ElementwiseLambda(Module): Non-parametric and non-args case This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - >>> # z = mean + noise * tf.exp(std * 0.5) >>> noise = tl.layers.Input([100, 1]) >>> mean = tl.layers.Input([100, 1]) >>> std = tl.layers.Input([100, 1]) @@ -209,7 +206,6 @@ class ElementwiseLambda(Module): Please avoid using Model.save() / Model.load() to save / load models that contain such ElementwiseLambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. Note: In this case, fn_weights should be a list, and then the trainable weights in this ElementwiseLambda layer can be added into the weights of the whole model. - >>> # z = mean + noise * tf.exp(std * 0.5) + vara >>> vara = [tf.Variable(1.0)] >>> def func(noise, mean, std): >>> return mean + noise * tf.exp(std * 0.5) + vara @@ -277,4 +273,4 @@ def forward(self, inputs, **kwargs): else: outputs = self.fn(*inputs, **kwargs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 8e41d5a97..3fc5737cc 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorlayer as tl - from tensorlayer import logging from tensorlayer.layers.core import Module @@ -27,8 +26,8 @@ class Concat(Module): >>> class CustomModel(Module): >>> def __init__(self): >>> super(CustomModel, self).__init__(name="custom") - >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') + >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') >>> self.concat = tl.layers.Concat(concat_dim=1, name='concat_layer') >>> def forward(self, inputs): @@ -70,6 +69,7 @@ def forward(self, inputs): outputs = self.concat(inputs) return outputs + class Elementwise(Module): """A layer that combines multiple :class:`Layer` that have the same output shapes according to an element-wise operation. @@ -78,7 +78,7 @@ class Elementwise(Module): Parameters ---------- combine_fn : a TensorFlow element-wise combine function - e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on. + e.g. AND is ``tl.minimum`` ; OR is ``tl.maximum`` ; ADD is ``tl.add`` ; MUL is ``tl.multiply`` and so on. See `TensorFlow Math API `__ . If the combine function is more complicated, please consider to use :class:`ElementwiseLambda`. act : activation function @@ -91,9 +91,9 @@ class Elementwise(Module): >>> class CustomModel(tl.models.Model): >>> def __init__(self): >>> super(CustomModel, self).__init__(name="custom") - >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') - >>> self.element = tl.layers.Elementwise(combine_fn=tf.minimum, name='minimum', act=tf.identity) + >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') + >>> self.element = tl.layers.Elementwise(combine_fn=tl.minimum, name='minimum', act=tl.identity) >>> def forward(self, inputs): >>> d1 = self.dense1(inputs) @@ -139,4 +139,4 @@ def forward(self, inputs): outputs = self.combine_fn(outputs, input) if self.act: outputs = self.act(outputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 1ced31579..c7146d6af 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -11,7 +11,15 @@ 'BatchNorm2d', 'BatchNorm3d', ] - +# TODO Layers that needs to be updated +# ['InstanceNorm', +# 'InstanceNorm1d', +# 'InstanceNorm2d', +# 'InstanceNorm3d', +# 'LayerNorm', +# 'GroupNorm', +# 'SwitchNorm', +# ] class BatchNorm(Module): """ @@ -52,7 +60,7 @@ class BatchNorm(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm()(net) Notes @@ -106,7 +114,6 @@ def __init__( self.build(None) self._built = True - if self.decay < 0.0 or 1.0 < self.decay: raise ValueError("decay should be between 0 to 1") @@ -181,8 +188,7 @@ def forward(self, inputs): if not self.is_train: self.batchnorm = tl.ops.BatchNorm( decay=self.decay, epsilon=self.epsilon, beta=self.beta, gamma=self.gamma, moving_mean=self.moving_mean, - moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, - is_train=False + moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, is_train=False ) outputs = self.batchnorm(inputs=inputs) if self.act_init_flag: @@ -200,7 +206,7 @@ class BatchNorm1d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.BatchNorm1d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv1d(32, 5, 1, in_channels=3) @@ -223,7 +229,7 @@ class BatchNorm2d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm2d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv2d(32, (5, 5), (1, 1), in_channels=3) @@ -246,7 +252,7 @@ class BatchNorm3d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm3d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv3d(32, (5, 5, 5), (1, 1), in_channels=3) diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 5a21b5047..84695b713 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -30,10 +30,10 @@ class PadLayer(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 224, 224, 3], name='input') + >>> net = tl.layers.Input([10, 224, 224, 3], name='input') >>> padlayer = tl.layers.PadLayer([[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT", name='inpad')(net) >>> print(padlayer) - >>> output shape : (None, 230, 230, 3) + >>> output shape : (10, 230, 230, 3) """ @@ -88,10 +88,10 @@ class ZeroPad1d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 1], name='input') + >>> net = tl.layers.Input([10, 100, 1], name='input') >>> pad1d = tl.layers.ZeroPad1d(padding=(3, 3))(net) >>> print(pad1d) - >>> output shape : (None, 106, 1) + >>> output shape : (10, 106, 1) """ @@ -142,10 +142,10 @@ class ZeroPad2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 3], name='input') + >>> net = tl.layers.Input([10, 100, 100, 3], name='input') >>> pad2d = tl.layers.ZeroPad2d(padding=((3, 3), (4, 4)))(net) >>> print(pad2d) - >>> output shape : (None, 106, 108, 3) + >>> output shape : (10, 106, 108, 3) """ @@ -189,7 +189,8 @@ class ZeroPad3d(Module): padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. - If tuple of 2 ints, interpreted as two different symmetric padding values for height and width as ``(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)``. - - If tuple of 2 tuples of 2 ints, interpreted as ``((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))``. + - If tuple of 2 tuples of 2 ints, interpreted as + ``((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))``. name : None or str A unique layer name. @@ -197,10 +198,10 @@ class ZeroPad3d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 3], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 3], name='input') >>> pad3d = tl.layers.ZeroPad3d(padding=((3, 3), (4, 4), (5, 5)))(net) >>> print(pad3d) - >>> output shape : (None, 106, 108, 110, 3) + >>> output shape : (10, 106, 108, 110, 3) """ diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index d454be254..c51969d73 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -55,9 +55,9 @@ class PoolLayer(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.PoolLayer()(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -118,9 +118,9 @@ class MaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.MaxPool1d(filter_size=3, strides=2, padding='SAME', name='maxpool1d')(net) - >>> output shape : [None, 25, 32] + >>> output shape : [10, 25, 32] """ @@ -163,13 +163,15 @@ def build(self, inputs_shape=None): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self.max_pool = tl.ops.MaxPool1d(ksize=self._filter_size, strides=self._strides, padding=self.padding, - data_format=self.data_format) + self.max_pool = tl.ops.MaxPool1d( + ksize=self._filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): outputs = self.max_pool(inputs) return outputs + class MeanPool1d(Module): """Mean pooling for 1D signal. @@ -190,9 +192,9 @@ class MeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.MeanPool1d(filter_size=3, strides=2, padding='SAME')(net) - >>> output shape : [None, 25, 32] + >>> output shape : [10, 25, 32] """ @@ -236,10 +238,9 @@ def build(self, inputs_shape=None): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self.avg_pool = tl.ops.AvgPool1d(ksize=self._filter_size, - strides=self._strides, - padding=self.padding, - data_format=self.data_format) + self.avg_pool = tl.ops.AvgPool1d( + ksize=self._filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): outputs = self.avg_pool(inputs) @@ -266,9 +267,9 @@ class MaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.MaxPool2d(filter_size=(3, 3), strides=(2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -342,9 +343,9 @@ class MeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.MeanPool2d(filter_size=(3, 3), strides=(2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -422,9 +423,9 @@ class MaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.MaxPool3d(filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 25, 32] + >>> output shape : [10, 25, 25, 25, 32] """ @@ -503,9 +504,9 @@ class MeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.MeanPool3d(filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 25, 32] + >>> output shape : [10, 25, 25, 25, 32] """ @@ -572,9 +573,9 @@ class GlobalMaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool1d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -628,9 +629,9 @@ class GlobalMeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool1d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -683,9 +684,9 @@ class GlobalMaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool2d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -738,9 +739,9 @@ class GlobalMeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool2d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -794,9 +795,9 @@ class GlobalMaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool3d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -850,9 +851,9 @@ class GlobalMeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool3d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -906,9 +907,9 @@ class CornerPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 32, 8], name='input') + >>> net = tl.layers.Input([10, 32, 32, 8], name='input') >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) - >>> output shape : [None, 32, 32, 8] + >>> output shape : [10, 32, 32, 8] """ @@ -980,9 +981,9 @@ class AdaptiveMeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool1d(output_size=16)(net) - >>> output shape : [None, 16, 3] + >>> output shape : [10, 16, 3] """ @@ -1035,9 +1036,9 @@ class AdaptiveMeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool2d(output_size=16)(net) - >>> output shape : [None,16, 16, 3] + >>> output shape : [10,16, 16, 3] """ @@ -1093,9 +1094,9 @@ class AdaptiveMeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool3d(output_size=16)(net) - >>> output shape : [None, 16, 16, 16, 3] + >>> output shape : [10, 16, 16, 16, 3] """ @@ -1151,9 +1152,9 @@ class AdaptiveMaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool1d(output_size=16)(net) - >>> output shape : [None, 16, 3] + >>> output shape : [10, 16, 3] """ @@ -1206,9 +1207,9 @@ class AdaptiveMaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool2d(output_size=16)(net) - >>> output shape : [None, 16, 16, 3] + >>> output shape : [10, 16, 16, 3] """ @@ -1263,9 +1264,9 @@ class AdaptiveMaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool3d(output_size=16)(net) - >>> output shape : [None, 16, 16, 16, 3] + >>> output shape : [10, 16, 16, 16, 3] """ diff --git a/tensorlayer/layers/quantize.py b/tensorlayer/layers/quantize.py index 1a64f63ad..02107c721 100644 --- a/tensorlayer/layers/quantize.py +++ b/tensorlayer/layers/quantize.py @@ -1,9 +1,6 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import os -os.environ['TL_BACKEND'] = 'tensorflow' -import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module from tensorlayer.layers.utils import quantize diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 23d611a40..5434cec6e 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -2,7 +2,1265 @@ # -*- coding: utf-8 -*- import numpy as np +import tensorflow as tf import tensorlayer as tl from tensorlayer import logging +from tensorlayer.decorators import deprecated_alias from tensorlayer.layers.core import Module + +# TODO: Need to update to version 3.0 +__all__ = [ + 'RNN', + 'SimpleRNN', + 'GRURNN', + 'LSTMRNN', + 'BiRNN', + # 'ConvRNNCell', + # 'BasicConvLSTMCell', + # 'ConvLSTM', + 'retrieve_seq_length_op', + 'retrieve_seq_length_op2', + 'retrieve_seq_length_op3', + 'target_mask_op', +] + + +class RNN(Module): + """ + The :class:`RNN` class is a fixed length recurrent layer for implementing simple RNN, + LSTM, GRU and etc. + + Parameters + ---------- + cell : TensorFlow cell function + A RNN cell implemented by tf.keras + - E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell + - Note TF2.0+, TF1.0+ and TF1.0- are different + + return_last_output : boolean + Whether return last output or all outputs in a sequence. + + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + For synced sequence input and output, see `PTB example `__ + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.RNN( + >>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1), + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='lstmrnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0], rnn_state[1]], name='rnn_model') + >>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM. + + A stacked RNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=False, return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=True, return_last_state=False + >>> )(rnn_out1) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out2) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + An example if the sequences have different length and contain padding. + Similar to the DynamicRNN in TL 1.x. + + If the `sequence_length` is provided in RNN's forwarding and both `return_last_output` and `return_last_state` + are set as `True`, the forward function will automatically ignore the paddings. Note that if `return_last_output` + is set as `False`, the synced sequence outputs will still include outputs which correspond with paddings, + but users are free to select which slice of outputs to be used in following procedure. + + The `sequence_length` should be a list of integers which indicates the length of each sequence. + It is recommended to + `tl.layers.retrieve_seq_length_op3 `__ + to calculate the `sequence_length`. + + >>> data = [[[1], [2], [0], [0], [0]], [[1], [2], [3], [0], [0]], [[1], [2], [6], [1], [1]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> class DynamicRNNExample(tl.models.Model): + >>> def __init__(self): + >>> super(DynamicRNNExample, self).__init__() + >>> self.rnnlayer = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=6, dropout=0.1), in_channels=1, return_last_output=True, + >>> return_last_state=True + >>> ) + >>> def forward(self, x): + >>> z, s = self.rnnlayer(x, sequence_length=tl.layers.retrieve_seq_length_op3(x)) + >>> return z, s + >>> model = DynamicRNNExample() + >>> model.eval() + >>> output, state = model(data) + + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + cell, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'rnn' + ): + + super(RNN, self).__init__(name=name) + + self.cell = cell + self.return_last_output = return_last_output + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info("RNN %s: cell: %s, n_units: %s" % (self.name, self.cell.__class__.__name__, self.cell.units)) + + def __repr__(self): + s = ('{classname}(cell={cellname}, n_units={n_units}') + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, cellname=self.cell.__class__.__name__, n_units=self.cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, sequence_length=None, initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + sequence_length: None or list of integers + The actual length of each sequence in batch without padding. + If provided, when `return_last_output` and `return_last_state` are `True`, + the RNN will perform in the manner of a dynamic RNN, i.e. + the RNN will return the actual last output / state without padding. + initial_state : None or list of Tensor (RNN State) + If None, `initial_state` is zero state. + + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + batch_size = inputs.get_shape().as_list()[0] + total_steps = inputs.get_shape().as_list()[1] + + # checking the type and values of sequence_length + if sequence_length is not None: + if isinstance(sequence_length, list): + pass + elif isinstance(sequence_length, tf.Tensor): + pass + elif isinstance(sequence_length, np.ndarray): + sequence_length = sequence_length.tolist() + else: + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "Type got %s" % type(sequence_length) + ) + if (len(sequence_length) != batch_size): + raise ValueError( + "The argument sequence_length should contain %d " % batch_size + + "elements indicating the initial length of each sequence, but got only %d. " % len(sequence_length) + ) + for i in sequence_length: + if not (type(i) is int or (isinstance(i, tf.Tensor) and i.dtype.is_integer)): + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "One element of sequence_length has the type %s" % type(i) + ) + if i > total_steps: + raise ValueError( + "The actual length of a sequence should not be longer than " + "that of the longest sequence (total steps) in this mini-batch. " + "Total steps of this mini-batch %d, " % total_steps + + "but got an actual length of a sequence %d" % i + ) + + sequence_length = tl.layers.retrieve_seq_length_op3(inputs) + + sequence_length = [i - 1 if i >= 1 else 0 for i in sequence_length] + + # set warning + # if (not self.return_last_output) and sequence_length is not None: + # warnings.warn( + # 'return_last_output is set as %s ' % self.return_last_output + + # 'When sequence_length is provided, it is recommended to set as True. ' + + # 'Otherwise, padding will be considered while RNN is forwarding.' + # ) + + # return the last output, iterating each seq including padding ones. No need to store output during each + # time step. + if self.return_last_output and sequence_length is None: + outputs = [-1] + else: + outputs = list() + + # initialize the states if provided + states = initial_state if initial_state is not None else self.cell.get_initial_state(inputs) + if not isinstance(states, list): + states = [states] + + stored_states = list() + + # initialize the cell + self.cell.reset_dropout_mask() + self.cell.reset_recurrent_dropout_mask() + + # recurrent computation + # FIXME: if sequence_length is provided (dynamic rnn), only iterate max(sequence_length) times. + for time_step in range(total_steps): + + cell_output, states = self.cell.call(inputs[:, time_step, :], states, training=self.is_train) + stored_states.append(states) + + if self.return_last_output and sequence_length is None: + outputs[-1] = cell_output + else: + outputs.append(cell_output) + + # prepare to return results + if self.return_last_output and sequence_length is None: + outputs = outputs[-1] + + elif self.return_last_output and sequence_length is not None: + outputs = tf.convert_to_tensor(outputs) + outputs = tf.gather(outputs, sequence_length, axis=0) + + outputs_without_padding = [] + for i in range(batch_size): + outputs_without_padding.append(outputs[i][i][:]) + outputs = tf.convert_to_tensor(outputs_without_padding) + else: + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, self.cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, total_steps, self.cell.units]) + + if self.return_last_state and sequence_length is None: + return outputs, states + elif self.return_last_state and sequence_length is not None: + + stored_states = tf.convert_to_tensor(stored_states) + stored_states = tf.gather(stored_states, sequence_length, axis=0) + + states = [] + for i in range(stored_states.shape[1]): + states.append(tf.convert_to_tensor([stored_states[b, i, b, :] for b in range(batch_size)])) + + return outputs, states + else: + return outputs + + +class SimpleRNN(RNN): + """ + The :class:`SimpleRNN` class is a fixed length recurrent layer for implementing simple RNN. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the simple RNN cell. + Please check tf.keras.layers.SimpleRNNCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.SimpleRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the simple rnn cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='simplernn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'simplernn' + **kwargs + ): + super(SimpleRNN, self).__init__( + cell=tf.keras.layers.SimpleRNNCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class GRURNN(RNN): + """ + The :class:`GRURNN` class is a fixed length recurrent layer for implementing RNN with GRU cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For GRU, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the GRU cell. + Please check tf.keras.layers.GRUCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.GRURNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the GRU cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'grurnn' + **kwargs + ): + super(GRURNN, self).__init__( + cell=tf.keras.layers.GRUCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class LSTMRNN(RNN): + """ + The :class:`LSTMRNN` class is a fixed length recurrent layer for implementing RNN with LSTM cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the LSTM cell. + Please check tf.keras.layers.LSTMCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.LSTMRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the LSTM cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'lstmrnn' + **kwargs + ): + super(LSTMRNN, self).__init__( + cell=tf.keras.layers.LSTMCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class BiRNN(Module): + """ + The :class:`BiRNN` class is a fixed length Bidirectional recurrent layer. + + Parameters + ---------- + fw_cell : TensorFlow cell function for forward direction + A RNN cell implemented by tf.keras, e.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell. + Note TF2.0+, TF1.0+ and TF1.0- are different + bw_cell: TensorFlow cell function for backward direction similar with `fw_cell` + return_seq_2d : boolean. + If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the two cells. The state is a list of Tensor. + - If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> # the fw_cell and bw_cell can be different + >>> rnnlayer = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=True + >>> ) + >>> # if return_last_state=True, the final state of the two cells will be returned together with the outputs + >>> # if return_last_state=False, only the outputs will be returned + >>> rnn_out, rnn_fw_state, rnn_bw_state = rnnlayer(inputs) + >>> # if the BiRNN is followed by a Dense, return_seq_2d should be True. + >>> # if the BiRNN is followed by other RNN, return_seq_2d can be False. + >>> dense = tl.layers.Dense(n_units=1)(rnn_out) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]]) + + A stacked BiRNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=False + >>> )(rnn_out1) + >>> dense = tl.layers.Dense(n_units=1)(rnn_out2) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + fw_cell, + bw_cell, + return_seq_2d=False, + return_last_state=False, + in_channels=None, + name=None, # 'birnn' + ): + super(BiRNN, self).__init__(name) + + self.fw_cell = fw_cell + self.bw_cell = bw_cell + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info( + "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % ( + self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__, + self.bw_cell.units + ) + ) + + def __repr__(self): + s = ( + '{classname}(fw_cell={fw_cellname}, fw_n_units={fw_n_units}' + ', bw_cell={bw_cellname}, bw_n_units={bw_n_units}' + ) + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, fw_cellname=self.fw_cell.__class__.__name__, + fw_n_units=self.fw_cell.units, bw_cellname=self.bw_cell.__class__.__name__, bw_n_units=self.bw_cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.fw_cell.build(tuple(inputs_shape)) + self.bw_cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.fw_cell.trainable_variables: + self._trainable_weights.append(var) + for var in self.bw_cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, fw_initial_state=None, bw_initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + fw_initial_state : None or list of Tensor (RNN State) + If None, `fw_initial_state` is zero state. + bw_initial_state : None or list of Tensor (RNN State) + If None, `bw_initial_state` is zero state. + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + fw_outputs = list() + bw_outputs = list() + + fw_states = fw_initial_state if fw_initial_state is not None else self.fw_cell.get_initial_state(inputs) + bw_states = bw_initial_state if bw_initial_state is not None else self.bw_cell.get_initial_state(inputs) + + if not isinstance(fw_states, list): + fw_states = [fw_states] + if not isinstance(bw_states, list): + bw_states = [bw_states] + + total_steps = inputs.get_shape().as_list()[1] + + self.fw_cell.reset_dropout_mask() + self.fw_cell.reset_recurrent_dropout_mask() + self.bw_cell.reset_dropout_mask() + self.bw_cell.reset_recurrent_dropout_mask() + + for time_step in range(total_steps): + fw_cell_output, fw_states = self.fw_cell.call(inputs[:, time_step, :], fw_states, training=self.is_train) + bw_cell_output, bw_states = self.bw_cell.call( + inputs[:, -time_step - 1, :], bw_states, training=self.is_train + ) + + fw_outputs.append(fw_cell_output) + bw_outputs.append(bw_cell_output) + + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, self.bw_cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, total_steps, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, total_steps, self.bw_cell.units]) + + outputs = tf.concat([fw_outputs, bw_outputs], -1) + + if self.return_last_state: + return outputs, fw_states, bw_states + else: + return outputs + + +''' +class ConvRNNCell(object): + """Abstract object representing an Convolutional RNN Cell.""" + + def __call__(self, inputs, state, scope=None): + """Run this RNN cell on inputs, starting from the given state.""" + raise NotImplementedError("Abstract method") + + @property + def state_size(self): + """size(s) of state(s) used by this cell.""" + raise NotImplementedError("Abstract method") + + @property + def output_size(self): + """Integer or TensorShape: size of outputs produced by this cell.""" + raise NotImplementedError("Abstract method") + + def zero_state(self, batch_size): #, dtype=LayersConfig.tf_dtype): + """Return zero-filled state tensor(s). + Args: + batch_size: int, float, or unit Tensor representing the batch size. + Returns: + tensor of shape '[batch_size x shape[0] x shape[1] x num_features] + filled with zeros + + """ + dtype = LayersConfig.tf_dtype + shape = self.shape + num_features = self.num_features + # TODO : TypeError: 'NoneType' object is not subscriptable + zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) + return zeros + + +class BasicConvLSTMCell(ConvRNNCell): + """Basic Conv LSTM recurrent network cell. + + Parameters + ----------- + shape : tuple of int + The height and width of the cell. + filter_size : tuple of int + The height and width of the filter + num_features : int + The hidden size of the cell + forget_bias : float + The bias added to forget gates (see above). + input_size : int + Deprecated and unused. + state_is_tuple : boolen + If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. + If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. + act : activation function + The activation function of this layer, tanh as default. + + """ + + def __init__( + self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, + act=tf.nn.tanh + ): + """Initialize the basic Conv LSTM cell.""" + # if not state_is_tuple: + # logging.warn("%s: Using a concatenated state is slower and will soon be " + # "deprecated. Use state_is_tuple=True.", self) + if input_size is not None: + logging.warn("%s: The input_size parameter is deprecated.", self) + self.shape = shape + self.filter_size = filter_size + self.num_features = num_features + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = act + + @property + def state_size(self): + """State size of the LSTMStateTuple.""" + return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) + + @property + def output_size(self): + """Number of units in outputs.""" + return self._num_units + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM).""" + with tf.compat.v1.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" + # Parameters of gates are concatenated into one multiply for efficiency. + if self._state_is_tuple: + c, h = state + else: + # print state + # c, h = tf.split(3, 2, state) + c, h = tf.split(state, 2, 3) + concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + # i, j, f, o = tf.split(3, 4, concat) + i, j, f, o = tf.split(concat, 4, 3) + + new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) + new_h = self._activation(new_c) * tf.nn.sigmoid(o) + + if self._state_is_tuple: + new_state = LSTMStateTuple(new_c, new_h) + else: + new_state = tf.concat([new_c, new_h], 3) + return new_h, new_state + + +def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): + """convolution: + + Parameters + ---------- + args : tensor + 4D Tensor or a list of 4D, batch x n, Tensors. + filter_size : tuple of int + Filter height and width. + num_features : int + Nnumber of features. + bias_start : float + Starting value to initialize the bias; 0 by default. + scope : VariableScope + For the created subgraph; defaults to "Linear". + + Returns + -------- + - A 4D Tensor with shape [batch h w num_features] + + Raises + ------- + - ValueError : if some of the arguments has unspecified or wrong shape. + + """ + # Calculate the total size of arguments on dimension 1. + total_arg_size_depth = 0 + shapes = [a.get_shape().as_list() for a in args] + for shape in shapes: + if len(shape) != 4: + raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) + if not shape[3]: + raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) + else: + total_arg_size_depth += shape[3] + + dtype = [a.dtype for a in args][0] + + # Now the computation. + with tf.compat.v1.variable_scope(scope or "Conv"): + matrix = tf.compat.v1.get_variable( + "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype + ) + if len(args) == 1: + res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') + else: + res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') + if not bias: + return res + bias_term = tf.compat.v1.get_variable( + "Bias", [num_features], dtype=dtype, + initializer=tf.compat.v1.initializers.constant(bias_start, dtype=dtype) + ) + return res + bias_term + + +class ConvLSTM(Module): + """A fixed length Convolutional LSTM layer. + + See this `paper `__ . + + Parameters + ---------- + prev_layer : :class:`Module` + Previous layer + cell_shape : tuple of int + The shape of each cell width * height + filter_size : tuple of int + The size of filter width * height + cell_fn : a convolutional RNN cell + Cell function like :class:`BasicConvLSTMCell` + feature_map : int + The number of feature map in the layer. + initializer : initializer + The initializer for initializing the parameters. + n_steps : int + The sequence length. + initial_state : None or ConvLSTM State + If None, `initial_state` is zero state. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output". + - If False, return all outputs, "Synced sequence input and output". + - In other word, if you want to stack more RNNs on this layer, set to False. + + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The output of this RNN. return_last_output = False, outputs = all cell_output, which is the hidden state. + cell_output.get_shape() = (?, h, w, c]) + + final_state : tensor or StateTuple + The finial state of this layer. + - When state_is_tuple = False, it is the final hidden and cell states, + - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : tensor or StateTuple + It is the initial state of this ConvLSTM layer, you can use it to initialize + your state at the beginning of each epoch or iteration according to your + training procedure. + + batch_size : int or tensor + Is int, if able to compute the batch_size, otherwise, tensor for ``?``. + + """ + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__( + self, + prev_layer, + cell_shape=None, + feature_map=1, + filter_size=(3, 3), + cell_fn=BasicConvLSTMCell, + initializer=tf.compat.v1.initializers.random_uniform(-0.1, 0.1), + n_steps=5, + initial_state=None, + return_last=False, + return_seq_2d=False, + name='convlstm', + ): + super(ConvLSTM, self).__init__(prev_layer=prev_layer, name=name) + + logging.info( + "ConvLSTM %s: feature_map: %d, n_steps: %d, " + "in_dim: %d %s, cell_fn: %s " % + (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) + ) + # You can get the dimension by .get_shape() or ._shape, and check the + # dimension by .with_rank() as follow. + # self.inputs.get_shape().with_rank(2) + # self.inputs.get_shape().with_rank(3) + + # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] + try: + self.inputs.get_shape().with_rank(5) + except Exception: + raise Exception( + "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " + "input_y, feature_map]" + ) + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) + + else: + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + outputs = [] + self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) + + if initial_state is None: + self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) + else: + self.initial_state = initial_state + + state = self.initial_state + + # with tf.variable_scope("model", reuse=None, initializer=initializer): + with tf.compat.v1.variable_scope(name, initializer=initializer) as vs: + for time_step in range(n_steps): + if time_step > 0: tf.compat.v1.get_variable_scope().reuse_variables() + (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) + outputs.append(cell_output) + + # Retrieve just the RNN variables. + # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] + rnn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + if return_last: + # 2D Tensor [batch_size, n_hidden] + self.outputs = outputs[-1] + else: + if return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 4D Tensor [n_example, h, w, c] + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) + else: + # : stack more RNN layer after that + # 5D Tensor [n_example/n_steps, n_steps, h, w, c] + self.outputs = tf.reshape( + tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map] + ) + + self.final_state = state + + self._add_layers(self.outputs) + self._add_params(rnn_variables) + +''' + + +# @tf.function +def retrieve_seq_length_op(data): + """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max), n_features] with zero padding on right hand side. + + Examples + ----------- + Single feature + + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [2 3 4] + + Multiple features + + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [4 3 4] + + References + ------------ + Borrow from `TFlearn `__. + + """ + with tf.name_scope('GetLength'): + used = tf.sign(tf.reduce_max(input_tensor=tf.abs(data), axis=2)) + length = tf.reduce_sum(input_tensor=used, axis=1) + + return tf.cast(length, tf.int32) + + +# @tf.function +def retrieve_seq_length_op2(data): + """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] with zero padding on right hand side. + + Examples + ----------- + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op2(data) + tensor([2 3 4]) + + """ + return tf.reduce_sum(input_tensor=tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), axis=1) + + +# @tf.function +def retrieve_seq_length_op3(data, pad_val=0): + """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or + [batch_size, n_step(max), n_features]. + + If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the + length of the string sequence. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side. + pad_val: + By default 0. If the data is tf.string, please assign this as empty string ('') + + Examples + ----------- + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([4, 3, 4]) + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [['hello','world','','',''], + >>> ['hello','world','tensorlayer','',''], + >>> ['hello','world','tensorlayer','2.0','']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='') + tensor([2, 3, 4]) + + """ + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.reduce_sum( + input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), + axis=1 + ) + elif data_shape_size == 2: + return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1) + elif data_shape_size == 1: + raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list()) + else: + raise ValueError( + "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size) + ) + + +def target_mask_op(data, pad_val=0): + """ Return the mask of the input sequence data based on the padding values. + + Parameters + ----------- + data : tf.Tensor + A tensor with 2 or 3 dimensions. + pad_val: int, float, string, etc + The value that represent padding. By default, 0. For tf.string, you may use empty string. + + Examples + ----------- + >>> data = [['hello', 'world', '', '', ''], + >>> ['hello', 'world', 'tensorlayer', '', ''], + >>> ['hello', 'world', 'tensorlayer', '2.0', '']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> mask = tl.layers.target_mask_op(data, pad_val='') + >>> print(mask) + tf.Tensor( + [[1 1 0 0 0] + [1 1 1 0 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[1], [0], [0], [0], [0]], + >>> [[1], [2], [3], [0], [0]], + >>> [[1], [2], [0], [1], [0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[1 0 0 0 0] + [1 1 1 0 0] + [1 1 0 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[0,0],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[1,0],[0,0]], + >>> [[3,3],[0,1],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[0 1 1 1 0] + [1 1 1 1 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + """ + + if not isinstance(data, tf.Tensor): + raise AttributeError("target_mask_op: the type of input data should be tf.Tensor but got %s." % type(data)) + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) + elif data_shape_size == 2: + return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) + elif data_shape_size == 1: + raise ValueError( + "target_mask_op: data_shape %s is not supported. " + "The shape of data should have 2 or 3 dims." % (data.get_shape()) + ) + else: + raise ValueError( + "target_mask_op: handling data_shape %s hasn't been implemented! " + "The shape of data should have 2 or 3 dims" % (data.get_shape()) + ) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 477847dc4..8ecdad0e6 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -202,11 +202,6 @@ def forward(self, inputs): if tl.BACKEND == 'tensorflow': in_shape = tl.get_tensor_shape(inputs) h, w, in_channel = in_shape[1:] - # if in_channel % self.group != 0: - # raise ValueError( - # "The in_channel must be a multiple of the number of groups. The in_channel got %d and the number of groups is %d." - # % (in_channel, self.group) - # ) reshape1 = tl.ops.Reshape([-1, h, w, in_channel // self.group, self.group]) temp = reshape1(inputs) temp = self.transpose(temp) diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py index 570f805dc..6c84291d7 100644 --- a/tensorlayer/layers/stack.py +++ b/tensorlayer/layers/stack.py @@ -24,14 +24,13 @@ class Stack(Module): Examples --------- - >>> import tensorflow as tf >>> import tensorlayer as tl - >>> ni = tl.layers.Input([None, 784], name='input') + >>> ni = tl.layers.Input([10, 784], name='input') >>> net1 = tl.layers.Dense(10, name='dense1')(ni) >>> net2 = tl.layers.Dense(10, name='dense2')(ni) >>> net3 = tl.layers.Dense(10, name='dense3')(ni) >>> net = tl.layers.Stack(axis=1, name='stack')([net1, net2, net3]) - (?, 3, 10) + (10, 3, 10) """ @@ -82,9 +81,9 @@ class UnStack(Module): Examples -------- - >>> ni = Input([4, 10], name='input') - >>> nn = Dense(n_units=5)(ni) - >>> nn = UnStack(axis=1)(nn) # unstack in channel axis + >>> ni = tl.layers.Input([4, 10], name='input') + >>> nn = tl.layers.Dense(n_units=5)(ni) + >>> nn = tl.layers.UnStack(axis=1)(nn) # unstack in channel axis >>> len(nn) # 5 >>> nn[0].shape # (4,) diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index b231d1b0e..18888c25d 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -433,8 +433,10 @@ def mean_var_with_update(update_moving_mean, update_moving_variance, mean, varia with tf.control_dependencies([update_moving_mean, update_moving_variance]): return tf.identity(mean), tf.identity(variance) + def w_fold(w, gama, var, epsilon): return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) + def bias_fold(beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/models/__init__.py b/tensorlayer/models/__init__.py index 2ebf9f076..080f9418c 100644 --- a/tensorlayer/models/__init__.py +++ b/tensorlayer/models/__init__.py @@ -3,10 +3,10 @@ # """A collections of pre-defined well known models.""" -from .core import * # from .resnet import ResNet50 # from .mobilenetv1 import MobileNetV1 # from .squeezenetv1 import SqueezeNetV1 # from .vgg import * # from .seq2seq import Seq2seq # from .seq2seq_with_attention import Seq2seqLuongAttention +from .core import Model \ No newline at end of file diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 00e5a1f24..42eb595b2 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -24,13 +24,28 @@ class Model: `Model` groups layers into an object with training and inference features. - Args: - network : The training or testing network. - loss_fn : Objective function, if loss_fn is None, the - network should contain the logic of loss and grads calculation, and the logic - of parallel if needed. Default: None. - optimizer : Optimizer for updating the weights. Default: None. - metrics : Dict or set of metrics to be evaluated by the model during + Parameters + ---------- + network : tensorlayer model + The training or testing network. + loss_fn : function + Objective function + optimizer : function + Optimizer for updating the weights + metrics : function + Dict or set of metrics to be evaluated by the model during + + Methods + --------- + trin() + Model training. + eval() + Model prediction. + save_weights() + Input file_path, save model weights into a file of given format. + Use load_weights() to restore. + load_weights() + Load model weights from a given file, which should be previously saved by save_weights(). Examples: >>> import tensorlayer as tl @@ -50,11 +65,12 @@ class Model: >>> return out >>> >>> net = Net() - >>> loss = tl.cost.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) - >>> optim = tl.layers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) + >>> loss = tl.cost.cross_entropy + >>> optim = tl.optimizers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) >>> dataset = get_dataset() >>> model.train(2, dataset) + """ def __init__(self, network, loss_fn=None, optimizer=None, metrics=None, **kwargs): diff --git a/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 b/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 deleted file mode 100644 index 904349fea3fbc3f8267a7cc70c5b7d7f464781e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24576 zcmeFXd0bD=|2CfXy`+fJqD4Zx)a%TgRwN{mH5C=Il~5wIX_Y8Nq)3ZaQd(Z~oRh5( zvdg|^sif@L?^L|={@maD_s8#kJbuglJ%5}z&-1)y=9-ynX3or+SO@#@GLl0i1%6%P z;sU|~^4*WWZ$EplDql^}U+;d~zx|%Q5B++>nBMS^V9!Ipt`vXylzZRH_olBQ)brwU zPtF1}T*i$P=qL0i{eS19vHo~>itBpQakkGE`So~Qxc9}n9{=C$dwu^M{C~a%9LCu= zcNapcrzlmuy3w0i)&1hX(%E+H9%Y91MC;Px|6GrUhWFrh_u_sB{d@KLZ(U0Z?3Vl` zy?z4H0^(ijUcMmMC%|Lnf)zfiyQ2>1N>1*Vzh6(frM(IM4t-bpuU+F|-1}Bwy-$Fj z{{nAs-<69^ygiJ){8k43@d*elTu7AzJUwY`1-BvPGH4?b^d;S%X^~=1o*5r zS>UlSV8KeSB_<2H;`X%!`ouNq%KR_z!oR=^1^R+b{|)x~D{dFrz8QJ_W&SG33ZH<* zKL3=r4-K>4lo$TRxsTPC#+ZMDyE*>>|IOL_-(as_B>#ZBNcOP$RT14+A68?#Ui_*Y zufJIJvHG&I_&2zl)gSQRtStWx?q>A|{D;-A8t=aPurfC4W@Y+U`}Vc^vNG}&l+qbr5K zz}=kxME$!ajl0zF7x-6A{*&^bn*1f2|6tW6j$d*A)O$CpKGt8Xy43I&xSQ1<@V{7f zso^hhH>*G3KdgSK^FLU1iQ`vX@4l+*YxQMi(kq5P;BHoZtiQm$V)*0kX4S{)?F+wj z|8G_%J=!q)OLcv%zcT7+?tk3BSoO930{1lcKkjbT{V}^$_s8l{-5>KG)&1&z|G~LO z8)ko`^Q(RPT7S{#5yL<3Zq9$A{@uPNJ!1F={HrGWTD>*-tDpY|s~&Bb{ZT_Vt3K9W zta`-o54fAvAMoF-Onb!e54fAvAMhX5{Tf03&C0Y#8)kph(9Nok)t8lNuNeNgyIJ+I z{sQ-k;g7qURUhm3yzr|re_egXI@4Zl`1I`yW__%`z`f1=$KB1UkM$S0x4HkgyIJ+I zezW>LG4y5C+xmY zzEp69&L>I({s96^|Ah9)gy*~z}>9= zfd9p+M-2afyIK7K|I~#zL_j>GiP5hr3{$7vG?k&D}Z#es3 zvg=v>2naa;Z>py0PE+lFX}GI1bzhfDd&1xO#GL4{51r}>jRbo_|L3yZ55I95V!u{r zU0WXCd)|BY;yPq|-v7p3*6ZmQ9r``t_Fmj^vmt+_XJp@FJCE-PyPf|pzWrX;D)$s$ zz@oR^qy-$hYWLT=Rz^UvYh4@U$m^^s@{J0pGUpp{qG+H(p_H)zvrJ{`>Ow*e}3(&N^~XudtbG8Iu`gH z{^8m4@DIU%{nz--ub+Tu_y38W+pl@gq1)9@xTi#7y};jL?{wES`}JJE-~SH(?HB4Q z!>^S3Yp8dZOF%$FV34@Lpv}GUMY?9wUw%PB!R`!ymHF3$P`}wKy^!SIuscxboqc~{ z`pXo|66odI7xRDTx##;zus4^klm`E=Q~NEG*(LvL-2Z7_*?<@5TdXY@Cf4yTf7ph^ zp&Hd(*+;YducM)*oy^>!tI_N0M$F6hCOFVO4fRtRO3n?>hn6|j>{v?`hbORjKm!qTsjxc$J8?^6v zL1wDu0M3{S6SaJa=0zFgrn(DXb2dZG7i6&8P6p+f#Pgz!qj@FUGa0s?qQznTF$qm2 z3NMwQ!r2gn>}Jz1nq~aMj>nn$y-qmi(?TZFM;1vx-j1Ero?_#ib?C+LRwnQA60kA$ zC;qW=0IF$BigFbA7@nn5(?7wX>v=G1haFB9GKZ%JUlN2?i9BRC3 zhfL@GAY5lUZ(@QL_Euq-D7|vx@OmJ>$}pMS$k{|5L-LlvB8ziH)+PfXXxFl+l1#KhB9J5k|7s)XiP#e3bYZ2eunD!ZNEJJ7tJio*Zk)w z^6*|Iukx2&a--hz8?*^MkN+9}7otZL97d6LwqkNlKl=VrS zbPk(9Z@n^N&R!l4N&Ff*2Ngo%&dVrZLj_v#sSFlM1c2$wM96(C1zERkNwC=}s=93( z>|Jn{G@aC<``E3tu)-Al;+OEB`uY*?FLr3`qVs6v*bs8eV;$V*Qt8%h1YOG~mYbR< z!^9`2c@Yib^khF1o`0nduKLo#w=QWu}v2(6}*k2$Fb;Dh943%x`Un>mBS+6NVwE-7!6Y?fQ_$O zVUyw;5|F0@Te(yezpj}%&|Jkx2W&(6$^&U)h$-Cm%3w}URY8{q?PPcmK=uY2u>cZFKvx!=a&=;h`_r7{|cjB0`SJYnQ+djnZ0%F}n6o6yvyulZ@a z-;t}H(x4k)g}!H}p+X@Sd?_`VSv%}8vyE4bFN=obXx9gfO#_RTy>11Ykio>@!FyyT zxEalfjzs+*=+mJOKSRQrRhd5S<()YRu-yXfXwX1P3-5sX z1#kQ=!W=7`t|3+%jj>Y63$pp806%VKS@o-Ba} zRG^xRh9H?)^7N&T5y+P{qRr_tT>KUlG-Xd3Gjpjoiht#Vva1t7UL+k^K2@XR;^G;T zPgsKz~G#c1b{Jd};~F}3``Sj;PfCb8E{=)^{{I+l;T&CkN%8PX8N+yZNh%kaUV z8J%peVFrhdphp+)hSoDq5IX-I891PTe^dQERGc3U_CmJ!;VVCK2!8N32oFRnri2iu za~F6w)0NSC9d#!4Llm-=i(#ZBwxOzVhD2q+0Up%vMTWCniO`Q>XvU=&{#XxhxEJ(| z88$T+Hrcn}r3(=mAMG+kd=>49rWO_>&Z;FZn z!Pf&o$uk}dT8qIkNE#>KzfFWx6G5r96OB6=3lbNa(F%7JoOP|9zR4O1S)2pkJl6q+ z3D3ipsa1I2lwEig&jlqcE<@#3%?ycFL094epk>4aD)@6Sx}*7t=aQvJP8nRO-e!H& z@{EKcK6HKxekL{%?cK>EHDZVG8|Qf37@5ITCLKWcR0XKpKxdR3mrlkG+ynIo)N#CY zG#zp<`kl*mbr)J}4Pn{W51c9{25Xb)k#`xiv%x=PzB4=eGExAqh@6LdF?3vy)8f^6* zwe7EFP7W6*msk&?G)RSVX===fLV0MBw?pG4$2UHka zD&dZ|z}FG?QGJIb9d|_q+!J3S$xnIcXo@jj<`IvSu2^CNfrT)UEGGd9XGm4yBzjRg z3<;DDW!j(_uFu|#o+vcJPw}BJHM^6nh#QY@7i=KZNr`(uNt|B0t!(+)(VcGm5e#h` z((qT_LG12w11*+TWClDlhXj3bbjM;Ob4y?x9Tuxh>tjtJX`=vnDD{asS*=RLW45B4 zIj(qCtR%Ww=gv1QE5kyr4p{YqN%cUcjnPtj0fE}hs4Ya5E?7RDrg%r84Qnn!%9a?I zT$qhdo_c}8RLA2l0`E}efnx978AxGcRDY5xc!-*<v2p+3={o+3SRaA!?N~l>Xr9^HZmLuus#d56SkmQhf*Bwn8UcuSpvFqcu3K{4h5}Z zV0Dlv-F0&+H$(p@O|ZW~Hk&%56%|6r*|ZMCljoD0K7v$<6-6H5>dZ2OrQFsFA~Z$H z2nE=0Lu=3duzWgS3}4vgNYDS=OpSIMq39z6>4}Z?5O9{k4Hq6k$ObJ;mc2(roG=-Y zL$MP-4(3KWK+I4*s40E}THG7NJs+?q+zx9hp+@`XfvMgKf*4_lAeOOe=hPAz%i)QTkf1wdKj8usOuc>2vK zlous@24?(-2A)V5p1d%Nt`Q4G_2pZL%o$(C<%KxEOh=2lkME?rVpXWZDP_y14^I3f znIF*MVn@?-=TOye(qKF83Uhs|1xybgz(#&CfPJ&OzUP%~u=bh&7(Bm&Iel&lO^LaS z!d(0yB~1n-eC^PDIaBO@Zycok)U-S6OH zK=dmmFY>MFg>g=F;HqQfLgx(Jp*RY%HxJ_k?cJfHT!UOr(xQ)wMnOx>QM9UM0ABQX z3*-9;s%19~!mBz&P_BXmewaUz@|kSL*6}Q}*gTosRrF?teq96(du!-v8H{s7&hf;D zV_wCmblg}y1AFCd=Ep~7pp7ORys-R$0vfN;+1ggvBs?3;d^G5;$~uJeJMrcI`%uAD z1saz#8r0S%VoiqzlCV}3c=z`q;h%ntPzgdhQ>EF46H@pE-cB~hJfQO5J`=wG8_cPV zCu_E!V#3nw$bgRqP_#6kh$_lLwSF4e?HgZxKXekwylG5RX1)Vk_rvsNnFz94?1XRM zHiZQd)zl%-lq#I9AX;^YpzUW3x+X4$4sY7VcT2kfS5<1zLhDQTMvgixUN45_wOqLy zrEN%Z&0g4UKOUV=&0%WX#Bf3SK02~Rjz<0H&x{F3=6~s2#+81$17c(N%-ftol;1Rq zS-$)_DowAYW`jI&OprRQdE-g_9cH7$@>{UM+d%sC&@<|qF#72|quJ4qF z4Yxq%{uL&6@(g@1-U^MfyA7#3bs7!COz$9EB&hwIctVe$k)reXdPv?itj9vf)T zf&>{_Y~csjM`fWUKLgRT4hc?XkR2B78WUFFc(^UN0k*U^!uhG9kiA_TwVl65J{76Z zcK095GXIIpLOqsFJbM?C_Qu2Pt?F>zTpuz8w;&_EA5>Yu9=|yk#&^k|#8m7k#d%?F zcw$i;xmtD*=~V86!~9HYKW!Q!D~I4OLTX?T6GulpsY9_sAr_aXAo=6h(8PU{sA|7PVmNgYHVo%++36{uJB@|O zXU$O4i~D3tvnMW)N}{I4Eik%f7_a60HD0vYCT{z9IeK$@Saqd)8Ct;U!-?7mYHLp! z^YRIJ>N^gyaSZMBD??{PMd;WF6RL#wLz?GSG(RW-jVF)rg8d^Q;!+mhTs4n0h8{q} z@2$rxJXVo$I+K~zQBh#e9cKI{FT^Paq(GJ|;bpi*kUeYe^R@AMaxZK=EnL3=ec@tY zrQv6IJ8B;yx0W~b^tEz+``%p;ULi|L&BDQdtPJ*gu?uC-v4GGS z7U=7{VbH%xiKWHLbZEvGr0bRf6Yi>`McZ9*yzf-vruTz9b@D;0+$+(|kQ<=osKj*c zTnsyw1tZtgsA9iywXuO zz@z0o^NAUX_B{+n6Dl*&uoDYVd}J%iNK}F!i{4Y{lLo`B@bfZW+<|*iI|Mug8c41 zK$8~o(Zui9VVQ6aiJDMF!su1{W|1VMXoWKD(f1@*ED~MSut1Mr3ZlqODj>_mk?2bX z(D`aM*h+OkBGEzXBqo5-LmO1@C4fBNo5M7NEC>`5hdnF55u5odkb&`b^2tt(K3q4C zNj$9q*rOQwu`$e*EE~RlRT6V~-8ANLSP^MH{KGOhA&O?bI9n|uC{Hix855Md69$P$ zqc7{e!kB=ojE4GMH0!7Z&#H42a+e8(gX+6UVR;t*v0Mj1gf+0T@gU~qORUSv;6*yzY=ZZY9i7 z6h{XJKSxfwg*?@^w~(i80XmhcbV!x~7-<|tmy@G0PkTQ9BGX2L<4c%wQ9ctH5{Gmj z9AtFwgff}F33zwma1fYAm^b1B;PX^}X8PD%(kWHKpFHpq*)wZ2N-I;No|jiK$u>Jk z#d%ROTXPtWoWZBqNd>Bn+{xH8=!?&4h@mt06qhBNUQWF zQnbI2Y(BD!$BSPGpB*@KY>E+%u;%ggU>IGV+X%HcJ@AtH`Se1DD@vC*3i?|(^zfDg z3>dwQoX4MF*s}THEi(r#o1R5NC-YHowign+e;&PhrwV=#RLIF93fpEUVeL~F(diQ- znFGR;sdLUwV!2~1s$Dx0t9@yM%ISRYNVmcTNo&!|Ne@V}Q$H*%5sRBdD!|C>4mJLn z&6I9i431G!L-3>(hj{+6 zUx|OiUa~WT51WO{QCx_V<;+vwxNe0MJu9RLGnEwJw#q8lj@Q!}KQ=R8r|hRAJfaI(;{^+I0u=!YR z*d(b$xhv6lch}g|tR_lp1c#s!wSnv{8#yeLZ_iDB=!;`~V<3H+6mm!X_*VkNvFc)J zc8W$A@;v{Q8Io6k9|$Inogy4WpMU?toZYSs-+!z{8t=21f)n}VSotS%D$^d>xhn9& z#wOC5Ve=txj3Qi6ozC3Kc4w4o7tmcVRG~u*(QA9n==G_2jr_d7fPu9y zNN5+KaNEyx+GaC4p=+OnYu-;EyF22P7st`CWCJ|#;z?q3`3TH1vBFKm&%yDfyTL{I zFq$2BAFElZ02RATPvj({m(JI6Myo z1D64j7b0FZPP9-d18=mkq?I|%jDmU$e!X3UjMm#k4Kv>(*{*%V&n*t{I)n$|xi(-h z_5;nER*R-79AZ|FEds?^72wTHAQ7ftm?q8jAnbOErtBM!%@b6(NxG?2NUOopL%SGX zxAB7?BLeZ!+ws^WN{X)h7KZOUtpa6zDfBS7h5t1ABAv8j3;yVFj2L^j^0o@)BJH5L zAT2(aWt6Aj;%UK!mp z0Z*H@mp)t~$)%+UVBhF#*hE+qd-;iSvizM?b-_%!-)9K(?&k`k8NUE~Bw4ba_rIbn zy`Maz^V{(y&A~8g@CZ)pt37I!NQEm#8O$gL9dzjMM5y(hLCe`XMszX5#N7D;HW5d0 zLAwEsvdYK8Gp8_HhqlqLiY`d6bRCt79?JRZY=qW41-8jV8sDLl@wUkcj8@qXdf%@T zO>)honb(%V;A$2;J=Sox7qakP;Zk(bDixKhhoaECCD{C1I~Fjy3YreP;jMN65*O!j zd+w^hhXoBJJxGU}9&V4#-9;H0Vt{UG?ZpkY*O9xPJZ6hR=q%|&)O%$jZ@$W2JR?&~!LQ~nstX>oxe51t}ATaaxY``olzWYxgW+Gdeex?YuK;}B@6MXoDh_C!h?>U*#tNIAJVzU zzgZ4W5Mud`2XJEW4RkHjjP7qTL;ZqA;M`!UB)E~BSiC9&cHBf4rt8yYL*O7}cE3VZAYsfey1_y!KZZX=x0 z`r9(dleCs*N;e z^1k?E-BT$vFvJGOje7|9D}ONYlG|w9&l~jh?JO$4bTi$tcQAUjC=3s%h``<#+*sXG zW8AQ21qe?lgmcx?uy}4XHM#Eq#|Mt$&T7$2@1|*4VMmKQNu?lx7z_{Oc$f6<)NId?Fth5 z;uNnba|kE>WjtOfA;$Up7f~x^e|BMx3VY4j6|Y}96QZ%M;^qnC(C&gc^louAJ>l5^UT&gjXxcKeD^!|m3eeyZm$lQkH~i@H z_H(?KG=eI|$gz*#?SZY!3}AF^I)AhFRC+FVD!cROVVpbnD{5}cBkdDkz#C0>tf*B^ zWn+&b>&N4r593i=MZw3>BX%xl*OrP7@6PM3CXDv+?`4?okKku+^U#!wqp{|^ z42Ztp4oW8%Y{?Aqwv?lZ9t7b~q=gfIc@FM-PuWg(tn#;bJzp(+zTWX{+8B zMrN4;mt9;4EsG0j_RufLl9h+n%D0$2aDb<#Qk>mPe`stsddIcO#gN(6hjATp%H^QV#$8{nJOQg#-@x%rBXHt?-Plm}1rC$nfzQf173Zwj_604!o?e( zh}X7cBqota3pY*0Sp&yX)jTQAbG8IkUowW?ANZ71316f-=aRs6QVyHsaGaZ3|NlF@fVG+1Yk< zQsW^8NeiKbleZW%by*x#3=F(p$cXBNKvjebh^~zx3<)3u^Q36obWhZ}r=3~7N|`?S z8b_+9chGOwQ{i^4B=sDaiB4EMa<@koV9S=rjM3?pRN$xt=e&SW8x>=U(>B1PyBCQ^ zL_1x-`YrRZbsL_wqLGRS%mmdB)?Iy16!JUf;_8euNKkDIx7=I@m%|gHx~h}tE!u$( zO<6{l-WW)d;*6QVz{PMZPLsX)EE}7MFx+WVM?7VmGma5Y$KeB~;5|x@p=9+v8f;ty zm>s}=&30rjeA+_I!uF9D+ImQ#zCZiO(4MUpdyb~6mNTm*^N`i3%`pC1*P6I~6s%L@ zz{ZyH5~i49+0mWEBc+7C>H5zU-0=>yUS&(aE<$8HXX$xVxTXfwx(km66)OZOchR zsV`?F}S`6!SxWcKxilzlhsv2Fm9F< z7uD!UrM9=Bp%*Su;}5c&#>X{CZ;$~e|6vU@Cz#W+Tl2{JWxLot(p~GFtcjp-$%W2A zad?hFG@Vnz;CYTCX~&7-beo77H`BJ2M8H!l%NOS=K6f$~hB)B2ON3$NIB|MIIth`( z8(>I~9*Essj_evN>5Zq-?Alr{JZIcMJh&)^CWIYBp31HC^g|QkR%S=Z2^~6FOa?E# z(m7nQZMVx$m8wK}o)M?aCd}xj~9VZ?OKQBo!_D1IDU1AA}-Vl!W*G#6V2`3nf z9UIXGUq0NtcnH6@Iz}2Z9>I&xLty{U6Xe#CNZ#Okn#@AQ{^**|e~}3N0{x%S^~~gt;f4BUk11PHQ zI`ItHvsQB4LH)0=-nbv&xvFS*wFVx#Uz<&I?vIrsnwVE2tFbT}0n#>dtoyQEuxL&? z}@`tZnFb_uPP-g-6Xgxg6EkD-`|7QfCzLy zWh=e+<2&KbD*^EUpt!e$+IBKqY4GE;YX8dbr(Yg(qn4kQU9xENsekjbqv(&!PQEPSRP^Cv; zvOp9@j`K$TH|p`R-Jg)r(x?1$wNr5G_x-52^%PzloePcp4Adr*#h;R(Ozjlq;FYZ# zQCjht5sQ{Ud$Ky|wZZf8`ygAi;rtOgC`l16zUK2K#8aT*$x?PzQX8|+YckZynbFO* z)3J`gbP}i(iY59dgH-lac=hBAI(lmsyff=hk1Y5?E0>DWkalxM&BzeO>3YB=t_||# z9z*u&aL`_SoH0;tAbV|f>2UcKbfLp_UWl0yZn``kDdYu0y}Bg~pBD$Vg^s8}VFwc- zT>uZC?IkiI?Qquj3KH>;1Jm0raCCSSWBu(W@_ic1)BCJJ3ZA!+^aC$RZQua9Z{qP8%ON^P zm@D|!#@zTCj!T9uWE?h)CJwn*k$##fF7LOWi0@lNHqA@H8U5yi*RY$&OX5CJcfN~) z&ir76R`^h9spWjp#Dl2i>Q3gmKr&u%sDzp)6~NHa&DCel8_}orV(5mb2d=|1*!uE8 zNPPK(n3@?7@0lz$$}d3MnqqPMk8*ykZZI^4ZbPrHxq-b?2N{0(dDpp-CQ#WYk7Q>Z zMmATRk>&f3%q_AXBCm;4brUK2@w6cEofiOeQU#IsT?JZWo{dzjb@9Efc2}Rf0~9W| zG7+)!QP8e;RWg3XWZLHSbV&AnM(kTZ6reQ>KOCclttE4*^W0&OKUDz>4ahhqDLeue z)GP6d1z+*T$g44xc4LWs`B$`S=r^9V_YE{`kN_kl*P>EUWnS*K9A>w@IXPpjk9CZ? z&KHk6O8Z~Y#PpRGU9E3NoEFA`Ro*tz`P3ZWPnw6+2k61Wss`jRz?1B`{sIE-TT*cw zOT2U50ji~&2?7Tw({w2d%x4I}&$S21^GRRGgj-^~`e822E?;GshQ)9~Of;OykD}A& zJHqgg7!o-n37QJlcg;)sFg0Wpd@@p`wO3C=%=1xX(y2pOb7?aZxL=KWPj|sC&+kI^ z%%;Q?c^|;#yCSkvM_t03WAF#4cmBw2QVk%;G zF|`V|kUlyer}}I|4wZ{&!<}?`G%p&x+c$#Fm9$|lKGOn`XIt@L)hZ}+SdT>Vk79#! zvyfwLG0!=}g|4<^)qSv&wPApT?)c<}Hd{XT>g~r{)ZD ze_sd7Tb|JBTn!qXiC~(hE-Pw(fZVI3)Gq%5j(ReaHV8;j1xIJp{CON}asCOTK5ZuV z^UQO6`s@+BKum;Oc@a%)vtQ#A-->bRvol~|8cAL~I)#O`CLurJ8I0yDjxn!k1%3U) zh&!u_Hr_M@(V&O4ajh`U4L^g7Vy&oUz#){DJ`N;L31h1gEfihSpUw9RAyTD7sYRLr zmla^dS6HCLc?HhKog+M{cL>YeiM60V?n%;;aj|%8un5Q&#N(40=c(}m2YPu_4Z1q= zB%0rH2et1h#%`YFG$`jP4EbCN{0oaw?&K6IE8lf~dQU6w-qGU}Cr`xhoK%<>&S$Zw zjsqS&BAI&Iq~M};YIJht29l(w{EeJNI}=@CTU!#dXxdhy)FI4T`NB4@&fI(joT)K9I zW@#ExX@w}NbZRG_B^W{auQJCL9y9697dz;IYtxbHp2g;S4x-!acIe=} z=G`O~^p>F*dt>niQapMKyg2Vd^ICZ{Y-kEx?es(a6CzNtjtJ)wa1nJ}iKH>XXQ;!X zO(-FEB2GS$#_r0lpl5PUfp4Q9LOC?n}5n-={whP;`?BHRB$YwYPHZn2Yn#Bl6{ci#fFTZ zjyHQ0pp)B`SbkeJ9{If$HU;lfo< zdU?Qg-0|ZJK2m86&gPHsDTQOCMADl5sMAR*2Y$up8ceuja!<*U{L8c=^9B;Ttwd{` z9jW)8YAXC%ibknV;HuqEG3HS!tlp0p>U*1E(@d4wtoh{#WlW=$YvkBWldi4YbJ@`L zpome-cnh-**sxpQr{a!7;Y{Z93i#NnLs3Z(Tjh0{n$!kyA$FZ~C|?9CG!Do6535t- z_SyJ-WlIrka?TyR*3o$}rvG@d3<52OiNV?x=R)8|3nJ&e=O8gUaMbMS;+;cS5FH9T#x z4vZFy$NR47Q?*@MTc_LLHl8b zd>c)X9SkX=M_Aj7shnRy16Mabmn(GCWJQ9bSwpqQbiRKs)CnBG5_O8m&haBims8Fx z@)Y(H+KKA5Q^}oW87!}HH0wEe9n&J2)%B0z31?-SMzy4ma%~4TAiss#?90MBModSX zwOKh6oNGha!ZSX!ET)qlKBK^j9vi`WzLjU&EBCPLQ%0bInW3!s*j@B+OFxPpl#?eZ zU1xV|Yw-Lh5oki=R!-*Qds0g*IP5Bmoh&A?lMMGWmbWdqIeF4t)P+P2Zn7x0-*o)i z26cZG*g)Ij~djNws6WCwEb96MoxT%(qh)=Hx7WaYM-tw(OoAw@G{l z_hOU*r};PspIfiO#teE2N+a}elTI`~+gQSRxNae;i&k<`sh3fak08WtvVuaHP&W9w zD4TVo9l4arfx?^=Zi&YNfr5MM@9$hVP1+0cb2^2jaN8H zvR|GcWenk-u3gPNU3i$Ak=q|luNP%ql}gzg&z7-=hkS%RJ5y<^y)1rn_b#sZRLu^5 zkb-Xyl*Y54r7{AN%h+?JrflATON`6RZEz^?3|;)8gr?bLbNAE?L8hq;?=%^Vf{YZ| zS!E-jS>qHZ5Ys{1hxOw=?sR06Q;Rv-Vk6Yy=!D!{1ds!3z@EFdj^5MuqnbOi+3~yW z;i2~>c5a&(9h$n8YdkRmW-89(W=L0{+M8MI2-mCpkyW+a2BVSOrne&WyE>ms5XeBQ zTMzQwQ%gAE;`yw9$6c7$qyXDB(vjvAH||2+KHT2njBYCBz`k)N*s#8wlOCHL4$hkNIG&Fy9~dm669W=XrQ;kJZPih4m!xE>)hT~S8j)XEHmwc z2D@i%0R?;vZ%t_A?J@M>r?cVs$Ar^#VOuWEo@a_Tx2{1}7cr7xVuE*)EIK@;S z62Q~cYhcyHB6wVB%Gy3WhgIIrW`$1{;n?$fcuZ+0zW8Yq9o?bME%8*q>lAHSv+7ms z%zke8xM?b*?6ClO@7Rs}_bIX4(pYqM##pj(_9k{>_HlktojRv?doP@J3+9Ah#4vBB zf5cbrI6}mRaK`gh6U}&4OG{IDtpDiQoORI&$S8~F1~;5x!rRku%O+8BTr`lbUhT;q z%NGSl)fm>!Y7n(_`$!M%GexhP6}a6O%$QZt7r18;Mv%ZC&rUi%n3FeiU>8O_q>Sm- zuJ4aa%rL91oQlgC8jyAqI?5DKlV~P0`y=KYm(Ic81I8lX5kv7cjVI`Ia1>fNP?D8e zt%N_+IcnkOhoroY(1|GrcS zg_K^#E0T_*q|=A_6Svv1j=39=|C__K&2T%=5OJ=vDU&RTL)7X+44CeoPF>el(6P&s z@hw9OPUx{2$8QdW+aXq5$s9v?DlUN+jy;Pugqg9C>%QZftHU8!O@*~wz7>D-OrfsL zHF$hZ6BXu}P`~$kxFx4ENq_SZto4L_v?WK5-FcEnJ7vY+H1F%SeK3&~U7H&_>$9n?Spi*&RcJBdWjI-SFNvyD`R1OAH1uEb zMYNr9n$~<2*7#W{0#P3^UEFM1WI&Y(- z4$Cj!WwOW}v>~d2#tk|GiUv{Wq~vu-;r7uhp8aU-^H929K%EM<%|JeF1=#nkKDys_ zn1DnW{cf#94b->e_fOuC6@g#S`9y2j+oi8|Q8ipQv4rXP{1X001?S>Ob)JTCI;c)^ z>t>~)Eq5D6Zs+&DKV+l^C0z`O(1lPlO|c9Uxl?0I(Z#ILCYMerWplpY@2@n(qO!w; zk?3-|l1fqP*ggACywCG~p7(jzzIp^gSv|X&tqwsMwQ%a#EEaKo8eiaV$@edG>jHS1FY!Z;)iUovi$Th@Y{# z59BU2vSQt9SasYE+&oQCrS0Ez*mDN%vf2r?S)DjX&y}t`C&kjyYc$3$mgyB~@&?B9 zc>}LN@;o_Cn77Xa?xeK=|T7MT5flB8%}H zfP?bqxV687$ZZ~>g`29Nv(b~4-p|LNP3a)o;mxi4riLx>vKL}HMJ(Z-0Xzn0=FvOQ5%~hc5@eXS`89eK6k*=R92{BW$9_21#70jiVS}5#urjHS zSDUHJ!zPI!k2;Aa1#?(auLE(am9l{M6(~AojFl4)$!dow2+o;;+qSDvYn4)pO>E%X z&^T^vJdB0@T1;nuc^h8_q|pH@Gp^{rON6AUX0*Bciu=!MGr{Sn9-{Ri4!^q|Dd`K= z;g>9_q%DnNy5X#ZWJZUe<8e-rI$K8x?`|3y?+*`Z8voL$nr#ingZ z!NJ?sH1VUH7L#li9&AiO2%yv2Od+v;5d`1-0{s#-nfjaOM~peXJ#hb?b=s+C!XSx{TB`aFP>ex}j|P zB%oobOY7A<^40z@e5**IbHg-Y4*ntW*1iR`xicYqfe5dhmXZ%s=FI!88Kk=g&}{j8 zZtM1)w6HvlT8ga6k|GIx8>_+uWv1q5pv{cjbXeKRhmxZ~OF3;V7nnXFrS7qr=sw>b zqq7|G`Fj;stn3)r&N3w?qxleX>IDSb)xr6_>u42!l&-IE;9^umSxc-h=4F|4O-@nV zNsk1WC{Dn;CFag*>s#URtDk6Ba6KKEk&VMMk5h?d3U=<|@NN8D{A;5P#st}O)~+Ic zjY>7`x;4V|8giZXyxxzHA5ViETqHMaij+BrFJ|r&<3LHX^9{$fc+Wu{9*-|ZgHj!8 zSmB6|PA>zaDmmTmbClfKQb;pu#IWVuOt4}SHdR_m%=Z>??Zt=K-U%-*y5uZ#jk-@h z%>EiKD4F=W?nqjBSt{ET-pxgg_%RJD18_L6^a$e`_(ZFhnk}!9&6?6+v2hQeH+t4*cJ6u~KxuFzZ>kv3 zih=iaqN9@ac%5Br~x>VK(*Q{NsMVe zUe-j|w9^meFXxf8o+Y?4(h%dj#n?aSLj$#LLHnO_F5=!5VzlK6dG$F1O0BDC=k){{ z+M0>Jw>8-$Da@fnf>qIsqlGWR8=Q`?~aJS5fnU(xP>zh`g<1aJl&5Q2r)3s*m z-)h2Eed=V(#(x0q&S_ZXJt_HJnSojFJB4K_yV3p8J9s*%z+wL^5I^-{+Z@F3a#05x zEs166`yLRFufNB<5D#W~<)APt!vSM*7L%fFY7J<2Le}E88LZb66@;`p2D~hdY>3Av|svLrQQYr4L`x}>KXC%a}I0Vz3H=y{LDNWi{3p1P* zkecPlvOKCGFDDy=t0<_oRG>FnaKX0lh+-a>~$mHz*ehMO8tHL!dUCz4d>!CmRFG6GHT;b=?a;TnP4nAL42uYs|gm5cD z!*tR~;pBB_y8Rn#6Nb2CFGGlsnbXtRdH8I+2@{sR#_xX*WV8G3;|uj082(!t(kP`q zS7%_wF+IL$#~>!Fi-bIF3*7vTnE!Ncf({O8(hCkf7)@;vdSnu*zbBn+{wE!7Nx|Ci u3cR$wjA+C=3R~^};3nSm(Oxqp-(DGqh8L7OY>OM($p9}Wt-)KZn(Tj?o?grV diff --git a/tensorlayer/package_info.py b/tensorlayer/package_info.py index 1efbae64a..65cf30614 100644 --- a/tensorlayer/package_info.py +++ b/tensorlayer/package_info.py @@ -5,7 +5,7 @@ MAJOR = 3 MINOR = 0 PATCH = 0 -PRE_RELEASE = '' +PRE_RELEASE = 'alpha' # Use the following formatting: (major, minor, patch, prerelease) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) From 2a2d8656646063fc90b5ca00bae06d8a32b53e4e Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 21 Jun 2021 15:51:31 +0800 Subject: [PATCH 15/36] update dataflow --- tensorlayer/dataflow/__init__.py | 21 - tensorlayer/dataflow/dataflow_examples.py | 56 - tensorlayer/dataflow/image/__init__.py | 17 - tensorlayer/dataflow/image/mindspore_image.py | 1539 ----------------- tensorlayer/dataflow/image/paddle_image.py | 19 - .../dataflow/image/tensorflow_image.py | 760 -------- tensorlayer/dataflow/mindspore_data.py | 287 --- tensorlayer/dataflow/paddle_data.py | 131 -- tensorlayer/dataflow/tensorflow_data.py | 266 --- 9 files changed, 3096 deletions(-) delete mode 100644 tensorlayer/dataflow/__init__.py delete mode 100644 tensorlayer/dataflow/dataflow_examples.py delete mode 100644 tensorlayer/dataflow/image/__init__.py delete mode 100644 tensorlayer/dataflow/image/mindspore_image.py delete mode 100644 tensorlayer/dataflow/image/paddle_image.py delete mode 100644 tensorlayer/dataflow/image/tensorflow_image.py delete mode 100644 tensorlayer/dataflow/mindspore_data.py delete mode 100644 tensorlayer/dataflow/paddle_data.py delete mode 100644 tensorlayer/dataflow/tensorflow_data.py diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py deleted file mode 100644 index 912a2384a..000000000 --- a/tensorlayer/dataflow/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function - -from tensorlayer.backend.ops.load_backend import BACKEND -from tensorlayer.dataflow import image - -if BACKEND == 'tensorflow': - from .tensorflow_data import * - -elif BACKEND == 'mindspore': - from .mindspore_data import * - -elif BACKEND == 'paddle': - from .paddle_data import * - -elif BACKEND == 'dragon': - pass - -else: - raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/dataflow/dataflow_examples.py b/tensorlayer/dataflow/dataflow_examples.py deleted file mode 100644 index 2bee24684..000000000 --- a/tensorlayer/dataflow/dataflow_examples.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorlayer as tl -from tensorlayer.dataflow import Dataset -import numpy as np - -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield (_input, np.array(_target)) - - -batch_size = 128 -shuffle_buffer_size = 128 -n_epoch = 10 - -import tensorflow as tf - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -import multiprocessing -train_ds = Dataset.from_generator( - generator=generator_train, output_types=(tl.float32, tl.int32) -) # , output_shapes=((24, 24, 3), (1))) - -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) - -train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) - -for X_batch, y_batch in train_ds: - print(X_batch.shape, y_batch.shape) diff --git a/tensorlayer/dataflow/image/__init__.py b/tensorlayer/dataflow/image/__init__.py deleted file mode 100644 index c0568ed47..000000000 --- a/tensorlayer/dataflow/image/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function - -from tensorlayer.backend.ops.load_backend import BACKEND - -if BACKEND == 'tensorflow': - from .tensorflow_image import * -elif BACKEND == 'mindspore': - from .mindspore_image import * -elif BACKEND == 'paddle': - from .paddle_image import * -elif BACKEND == 'pytorch': - pass - -else: - raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/dataflow/image/mindspore_image.py b/tensorlayer/dataflow/image/mindspore_image.py deleted file mode 100644 index 9f10c7d1a..000000000 --- a/tensorlayer/dataflow/image/mindspore_image.py +++ /dev/null @@ -1,1539 +0,0 @@ -import numpy as np -from PIL import Image, ImageOps, ImageEnhance, __version__ -import random -import colorsys -import numbers -import math -import io -__all__ = [ - 'CentralCrop', - 'HsvToRgb', - 'AdjustBrightness', - 'AdjustContrast', - 'AdjustHue', - 'AdjustSaturation', - 'Crop', - 'FlipHorizontal', - 'FlipVertical', - 'GrayToRgb', - 'Standardization', - 'RgbToGray', - 'PadToBoundingbox', - 'Pad', - 'RandomBrightness', - 'RandomContrast', - 'RandomHue', - 'RandomSaturation', - 'RandomCrop', - 'Resize', - 'CropAndResize', - 'CropOrPad', - 'ResizeAndPad', - 'RgbToHsv', - 'Transpose', - 'RandomRotation', - 'RandomShift', - 'RandomShear', - 'RandomZoom', - 'Rescale', - 'RandomFlipVertical', - 'RandomFlipHorizontal', - 'HWC2CHW', - 'CHW2HWC', -] - -augment_error_message = 'img should be PIL image. Got {}.' - - -def ToTensor(image): - - image = np.asarray(image).astype(np.float32) - return image - - -def ToPIL(image): - """ - Convert the input image to PIL format. - - Args: - img: Image to be converted. - - Returns: - img (PIL image), Converted image. - """ - return Image.fromarray(np.array(image).astype(np.uint8)) - - -def Decode(image): - """ - Decode the input image to PIL image format in RGB mode. - - Args: - img: Image to be decoded. - - Returns: - img (PIL image), Decoded image in RGB mode. - """ - - try: - data = io.BytesIO(image) - img = Image.open(data) - return img.convert('RGB') - except IOError as e: - raise ValueError("{0}\nWARNING: Failed to decode given image.".format(e)) - except AttributeError as e: - raise ValueError("{0}\nWARNING: Failed to decode, Image might already be decoded.".format(e)) - - -def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): - ''' - - Parameters - ---------- - image: - A image or a batch of images - offset_height: - Vertical coordinate of the top-left corner of the result in the input. - offset_width: - Horizontal coordinate of the top-left corner of the result in the input. - target_height: - Height of the result. - target_width: - Width of the result. - is_hwc: - If is_hwc is True, the order of image channels is [B,H,W,C] or [H,W,C]. If is_hwc is False, the order of image channels is [B,C,H,W] or [C,H,W,] - Returns: - Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - - if offset_width < 0: - raise ValueError('offset_width must be >0.') - if offset_height < 0: - raise ValueError('offset_height must be >0.') - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if offset_width + target_width > width: - raise ValueError('offset_width + target_width must be <= image width.') - if offset_height + target_height > height: - raise ValueError('offset_height + target_height must be <= image height.') - - if shape_size == 3: - if is_hwc: - return ToTensor( - image[offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] - ) - else: - return ToTensor( - image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width] - ) - else: - if is_hwc: - return ToTensor( - image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] - ) - else: - return ToTensor( - image[:, :, offset_height:offset_height + target_height, offset_width:offset_width + target_width] - ) - - -def CentralCrop(image, central_fraction=None, size=None, is_hwc=True): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth] or a 4-D Tensor of shape [batch, height, width, depth], - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D float Tensor or 4-D float Tensor, as per the input. - ------- - If backend is tensorflow, central_fraction will be used preferentially. if size is used, the height-width ratio will be equivalent to original ratio.. - If backend is mindspore, size will be used preferentially. - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - if size is None: - if central_fraction > 1 or central_fraction <= 0: - raise ValueError('central_fraction must be in (0,1].') - target_height = int(round(height * central_fraction)) - target_width = int(round(width * central_fraction)) - size = (target_height, target_width) - if isinstance(size, int): - size = (size, size) - crop_height, crop_width = size - crop_top = int(round((height - crop_height) / 2.)) - crop_left = int(round((width - crop_width) / 2.)) - - return Crop(image, crop_top, crop_left, crop_height, crop_width, is_hwc) - - -def hsv_to_rgb(np_hsv_img, is_hwc): - """ - Convert HSV img to RGB img. - - Args: - np_hsv_img (numpy.ndarray): NumPy HSV image array of shape (H, W, C) or (C, H, W) to be converted. - is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). - - Returns: - np_rgb_img (numpy.ndarray), NumPy HSV image with same shape of np_hsv_img. - """ - if is_hwc: - h, s, v = np_hsv_img[:, :, 0], np_hsv_img[:, :, 1], np_hsv_img[:, :, 2] - else: - h, s, v = np_hsv_img[0, :, :], np_hsv_img[1, :, :], np_hsv_img[2, :, :] - to_rgb = np.vectorize(colorsys.hsv_to_rgb) - r, g, b = to_rgb(h, s, v) - - if is_hwc: - axis = 2 - else: - axis = 0 - np_rgb_img = np.stack((r, g, b), axis=axis) - return np_rgb_img - - -def HsvToRgb(image, is_hwc=True): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - batch_size = 0 - if is_hwc: - num_channels = image.shape[2] - else: - num_channels = image.shape[0] - else: - batch_size = image.shape[0] - if is_hwc: - num_channels = image.shape[3] - else: - num_channels = image.shape[1] - - if num_channels != 3: - raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) - if batch_size == 0: - return hsv_to_rgb(image, is_hwc) - return ToTensor([hsv_to_rgb(img, is_hwc) for img in image]) - - -def AdjustBrightness(image, factor): - ''' - - Parameters - ---------- - image: - input NumPy image array or PIL image - factor: - factor should be in the range (-1,1) - Returns: - ------- - np darray image - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if factor >= 1 or factor <= -1: - raise ValueError('factor must be in (-1,1).') - image = image + factor * 255 - image = np.clip(image, 0, 255) - - return ToTensor(image) - - -def AdjustContrast(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image = ImageEnhance.Contrast(image).enhance(factor) - - return ToTensor(image) - - -def AdjustHue(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image_hue_factor = factor - if not -1 <= image_hue_factor <= 1: - raise ValueError('image_hue_factor {} is not in [-1, 1].'.format(image_hue_factor)) - - mode = image.mode - if mode in {'L', '1', 'I', 'F'}: - return image - - hue, saturation, value = image.convert('HSV').split() - - np_hue = np.array(hue, dtype=np.uint8) - - with np.errstate(over='ignore'): - np_hue += np.uint8(image_hue_factor * 255) - hue = Image.fromarray(np_hue, 'L') - - image = Image.merge('HSV', (hue, saturation, value)).convert(mode) - - return ToTensor(image) - - -def AdjustSaturation(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - enhancer = ImageEnhance.Color(image) - image = enhancer.enhance(factor) - - return ToTensor(image) - - -def FlipHorizontal(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image = np.fliplr(image) - - return image - - -def FlipVertical(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - image = np.flipud(image) - - return image - - -def GrayToRgb(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape = image.shape - output_image = np.zeros((shape[0], shape[1], 3), dtype=np.uint8) - if len(shape) == 3: - for i in range(3): - output_image[:, :, i] = image[:, :, 1] - elif len(shape) == 2: - for i in range(3): - output_image[:, :, i] = image - - return ToTensor(output_image) - - -def RgbToGray(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - ''' - 将彩色图像转换为灰度(模式“L”)时,库使用ITU-R 601-2 Luma转换: - L = R * 299/1000 + G * 587/1000 + B * 114/1000 - ''' - image = image.convert('L') - return ToTensor(image) - - -def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 3-D numpy ndarray or 4-D numpy ndarray image - offset_height: - Number of rows of zeros to add on top. - offset_width: - Number of columns of zeros to add on the left. - target_height: - Height of output image. - target_width - Width of output image. - Returns - A numpy ndarray image - ------- - ''' - - if offset_height < 0: - raise ValueError("offset_height must be >= 0") - if offset_width < 0: - raise ValueError("offset_width must be >= 0") - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - top = offset_height - bottom = target_height - height - top - left = offset_width - right = target_width - width - left - - if bottom < 0: - raise ValueError("target_height must be >= offset_height + height") - - if right < 0: - raise ValueError("target_width must be >= offset_width + width") - - if shape_size == 3: - if is_hwc: - return ToTensor( - np.pad( - image, ((top, bottom), (left, right), (0, 0)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - return ToTensor( - np.pad( - image, ((0, 0), (top, bottom), (left, right)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - if is_hwc: - return ToTensor( - np.pad( - image, ((0, 0), (top, bottom), (left, right), (0, 0)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - return ToTensor( - np.pad( - image, ((0, 0), (0, 0), (top, bottom), (left, right)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - - -def Pad(image, padding, padding_value=0, mode='constant', is_hwc=True): - ''' - - Parameters - ---------- - image: - A 3-D or 4-D Tensor. - padding: - An integer or a list/tuple. If a single number is provided, pad all borders with this value. - If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. - If 4 values are provided as a list or tuple, pad the left, top, right and bottom respectively. - padding_value: - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - mode: - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - Returns: - A padded Tensor. Has the same type as tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = image.shape - if len(shape_size) == 3: - batch_size = 0 - elif len(shape_size) == 4: - batch_size = shape_size[0] - else: - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if mode not in ('constant', 'edge', 'reflect', 'symmetric'): - raise TypeError('mode should be one of (constant,edge,reflect,symmetric).') - - if isinstance(padding, int): - padding = ((padding, padding), (padding, padding)) - elif isinstance(padding, list) or isinstance(padding, tuple): - if len(padding) == 2: - padding = ((padding[0], padding[0]), (padding[1], padding[1])) - elif len(padding) == 4: - padding = ((padding[0], padding[1]), (padding[2], padding[3])) - else: - raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) - else: - raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) - - if batch_size == 0: - if is_hwc: - padding = (padding[0], padding[1], (0, 0)) - else: - padding = ( - (0, 0), - padding[0], - padding[1], - ) - else: - if is_hwc: - padding = ((0, 0), padding[0], padding[1], (0, 0)) - else: - padding = ((0, 0), (0, 0), padding[0], padding[1]) - if mode == 'constant': - return ToTensor(np.pad(image, padding, mode=mode, constant_values=(padding_value, padding_value))) - else: - return ToTensor(np.pad(image, padding, mode=mode)) - - -def Standardization(image, mean=None, std=None, channel_mode=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - num_shape = image.shape - if is_hwc: - height, width, channels = 0, 1, 2 - else: - channels, height, width = 0, 1, 2 - if mean is not None and std is not None: - if len(mean) != len(std): - raise ValueError("Length of mean and std must be equal") - if len(mean) == 1: - mean = [mean[0]] * num_shape[channels] - std = [std[0]] * num_shape[channels] - mean = np.array(mean, dtype=image.dtype) - std = np.array(std, dtype=image.dtype) - return ToTensor((image - mean[:, None, None]) / std[:, None, None]) - elif mean is None and std is None: - if channel_mode: - num_pixels = num_shape[height] * num_shape[width] - image_mean = np.mean(image, axis=(height, width)) - stddev = np.std(image, axis=(height, width)) - min_sttdev = 1 / np.sqrt(num_pixels) - min_sttdev = [min_sttdev] * num_shape[channels] - adjusted_sttdev = np.maximum(stddev, min_sttdev) - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return ToTensor(image) - else: - num_pixels = num_shape[height] * num_shape[width] * num_shape[channels] - image_mean = np.mean(image, axis=(0, 1, 2)) - image_mean = [image_mean] * 3 - stddev = np.std(image, axis=(0, 1, 2)) - min_sttdev = 1 / np.sqrt(num_pixels) - adjusted_sttdev = np.maximum(stddev, min_sttdev) - adjusted_sttdev = [adjusted_sttdev] * 3 - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return ToTensor(image) - else: - raise ValueError('std and mean must both be None or not None') - - -def RandomBrightness(image, factor): - ''' - - Parameters - ---------- - image: - An image or images to adjust - factor: - Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). - Returns: - The brightness-adjusted image(s). - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if factor < 0 or factor > 1: - raise ValueError('factor should be in [0,1].') - delta = random.uniform(-factor, factor) - image = image + delta * 255 - image = np.clip(image, 0, 255) - - return image - - -def RandomContrast(image, lower, upper, seed=None): - ''' - - Parameters - ---------- - image: - An image tensor with 3 or more dimensions. - lower: - float. Lower bound for the random contrast factor. - upper: - float. Upper bound for the random contrast factor. - seed: - A Python integer. Used to create a random seed. - - Returns: - The contrast-adjusted image(s). - ------- - ''' - if upper <= lower: - raise ValueError('upper must be > lower') - if lower < 0: - raise ValueError('lower must be non-negative') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - factor = random.uniform(lower, upper) - image = ImageEnhance.Contrast(image).enhance(factor) - - return ToTensor(image) - - -def RandomHue(image, factor, seed=None): - ''' - - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - factor: - float. The maximum value for the random factor. - seed: - An operation-specific seed. I - - Returns: - Adjusted numpy ndarrry image(s). - ------- - - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if factor > 0.5 or factor < 0: - raise ValueError('factor should be in [0,0.5].') - - image_hue_factor = random.uniform(-factor, factor) - mode = image.mode - if mode in {'L', '1', 'I', 'F'}: - return image - - hue, saturation, value = image.convert('HSV').split() - - np_hue = np.array(hue, dtype=np.uint8) - - with np.errstate(over='ignore'): - np_hue += np.uint8(image_hue_factor * 255) - hue = Image.fromarray(np_hue, 'L') - - image = Image.merge('HSV', (hue, saturation, value)).convert(mode) - - return ToTensor(image) - - -def RandomSaturation(image, lower, upper, seed=None): - ''' - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - lower: - float. Lower bound for the random saturation factor. - upper: - float. Upper bound for the random saturation factor. - seed: - An operation-specific seed. - - Returns; - Adjusted numpy ndarray image(s). - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - if upper <= lower: - raise ValueError('upper must be > lower.') - - if lower < 0: - raise ValueError('lower must be non-negative.') - factor = random.uniform(lower, upper) - enhancer = ImageEnhance.Color(image) - image = enhancer.enhance(factor) - - return ToTensor(image) - - -def RandomCrop(image, size, is_hwc=True): - ''' - - Parameters - ---------- - image: - Input an image to crop. - size: - if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. - shape of cropped image will be [height, width, 3]. - Returns: - A cropped image of the same rank as image and shape size. - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(size, int): - size = (size, size) - elif isinstance(size, (tuple, list)) and len(size) == 2: - size = size - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - - def _input_to_factor_(image, size, is_hwc): - if len(image.shape) == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - - target_height, target_width = size - if target_height > height or target_width > width: - raise ValueError("Crop size {} is larger than input image size {}".format(size, (height, width))) - if target_height == height and target_width == width: - return 0, 0, height, width - - top = random.randint(0, height - target_height) - left = random.randint(0, width - target_width) - return top, left, target_height, target_width - - top, left, height, width = _input_to_factor_(image, size, is_hwc) - - return Crop(image, top, left, height, width, is_hwc) - - -def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): - ''' - - Parameters - ---------- - images: - Input an image to resize - size: - if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. - shape of resized image will be [height, width, 3]. - method: - An image.ResizeMethod, or string equivalent. Defaults to bilinear. - preserve_aspect_ratio: - Whether to preserve the aspect ratio. - antialias: - Whether to use an anti-aliasing filter when downsampling an image. - Returns: - an resized image - ------- - ''' - DE_PY_INTER_MODE = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'cubic': Image.CUBIC, - 'lanczos': Image.LANCZOS, - 'bicubic': Image.BICUBIC - } - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - if isinstance(size, int): - size = (size, size) - elif isinstance(size, (tuple, list)) or len(size) == 2: - target_height, target_width = size - size = (target_width, target_height) - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - if method not in ('nearest', 'bilinear', 'cubic', 'lanczos', 'bicubic'): - raise TypeError('Unknown resize method! resize method must be in (nearest bilinear cubic lanczos bicubic)') - - if preserve_aspect_ratio: - width, height = image.size - target_width, target_height = size - scale_factor_height = float(target_height / height) - scale_factor_width = float(target_width / width) - scale_factor = np.minimum(scale_factor_height, scale_factor_width) - new_target_height = int(scale_factor * height) - new_target_width = int(scale_factor * width) - size = (new_target_width, new_target_height) - interpolation = DE_PY_INTER_MODE[method] - image = image.resize(size, interpolation) - if antialias: - image = image.resize(size, Image.ANTIALIAS) - - return ToTensor(image) - - -def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. - boxes: - A 2-D tensor of shape [num_boxes, 4]. - box_indices: - A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). - The value of box_ind[i] specifies the image that the i-th box refers to. - crop_size: - A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. - The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. - method: - An optional string specifying the sampling method for resizing. - It can be either "bilinear" or "nearest" and default to "bilinear". - extrapolation_value: - An optional float. Defaults to 0. Value used for extrapolation, when applicable. - Returns: - A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. - ------- - - ''' - if method not in ["bilinear", "nearest"]: - raise ValueError('method must be bilinear or nearest.') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - boxes = np.asarray(boxes) - box_indices = np.asarray(box_indices) - image_shape = image.shape - if len(image_shape) == 4: - batch_size = image_shape[0] - elif len(image_shape) == 3: - image = np.expand_dims(image, axis=0) - else: - raise ValueError('Input must be a 3-D or 4-D image Tensor.') - - box_num = boxes.shape[0] # boxes.shape is [n,4]. n is the number of boxes. - if not is_hwc: # 判断通道顺序,为了便于后续计算,将通道顺序调整为HWC or BHWC - image = np.transpose(image, (0, 2, 3, 1)) - batch_size, height, width, channels = image.shape - return_image = np.zeros((box_num, crop_size[0], crop_size[1], 3)) - for i in range(box_num): - y1, x1, y2, x2 = boxes[i] # 首先判断图像是否需要翻转 , 若y1>y2 需要垂直翻转, 若x1>x2 需要水平翻转 - cur_image = image[box_indices[i]] - if y1 > y2: - cur_image = FlipVertical(cur_image) - y1, y2 = y2, y1 - if x1 > x2: - cur_image = FlipHorizontal(cur_image) - x1, x2 = x2, x1 - top_padding = 0 if y1 > 0 else int(round(height * (-y1))) - left_padding = 0 if x1 > 0 else int(round(width * (-x1))) - bottom_padding = 0 if y2 < 1 else int(round(height * (y2 - 1))) - right_padding = 0 if x2 < 1 else int(round(width * (x2 - 1))) - # 判断是否需要padding - target_height = top_padding + height + bottom_padding - target_width = left_padding + width + right_padding - if target_height != height or target_width != width: - cur_image = PadToBoundingbox( - cur_image, offset_height=top_padding, offset_width=left_padding, target_height=target_height, - target_width=target_width, padding_value=extrapolation_value, is_hwc=is_hwc - ) - offset_height = 0 if y1 < 0 else int(round(height * y1)) - offset_width = 0 if x1 < 0 else int(round(width * x1)) - target_height = int(round(height * (y2 - y1))) - target_width = int(round(width * (x2 - x1))) - crop_image = Crop(cur_image, offset_height, offset_width, target_height, target_width, is_hwc) - resized_image = Resize(crop_image, crop_size, method=method) - return_image[i] = resized_image - if not is_hwc: - return_image = np.transpose(return_image, (0, 3, 1, 2)) - return ToTensor(return_image) - - -def CropOrPad(image, target_height, target_width, is_hwc=True): - ''' - Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. - Parameters - ---------- - image: - 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target width. - Returns: - Cropped and/or padded image. - ------- - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - offset_height = height - target_height - offset_width = width - target_width - offset_crop_height = max(offset_height // 2, 0) - offset_crop_width = max(offset_width // 2, 0) - offset_pad_height = max(-offset_height // 2, 0) - offset_pad_width = max(-offset_width // 2, 0) - cropped = Crop( - image, offset_crop_height, offset_crop_width, min(height, target_height), min(width, target_width), is_hwc - ) - - padded = PadToBoundingbox(cropped, offset_pad_height, offset_pad_width, target_height, target_width, is_hwc=is_hwc) - - return ToTensor(padded) - - -def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - Returns: - Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. - If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - height = float(height) - width = float(width) - ratio = max(height / target_height, width / target_width) - resized_height = int(round(height / ratio)) - resized_width = int(round(width / ratio)) - padding_height = max(0, int(round((target_height - resized_height) / 2))) - padding_width = max(0, int(round((target_width - resized_width) / 2))) - resized = Resize( - image, size=(resized_height, resized_width), method=method, antialias=antialias - ) #需要解决 batch images的resize - padded = PadToBoundingbox(resized, padding_height, padding_width, target_height, target_width, is_hwc=is_hwc) - return ToTensor(padded) - - -def rgb_to_hsv(np_rgb_img, is_hwc): - """ - Convert RGB img to HSV img. - - Args: - np_rgb_img (numpy.ndarray): NumPy RGB image array of shape (H, W, C) or (C, H, W) to be converted. - is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). - - Returns: - np_hsv_img (numpy.ndarray), NumPy HSV image with same type of np_rgb_img. - """ - if is_hwc: - r, g, b = np_rgb_img[:, :, 0], np_rgb_img[:, :, 1], np_rgb_img[:, :, 2] - else: - r, g, b = np_rgb_img[0, :, :], np_rgb_img[1, :, :], np_rgb_img[2, :, :] - to_hsv = np.vectorize(colorsys.rgb_to_hsv) - h, s, v = to_hsv(r, g, b) - if is_hwc: - axis = 2 - else: - axis = 0 - np_hsv_img = np.stack((h, s, v), axis=axis) - return np_hsv_img - - -def RgbToHsv(image, is_hwc=True): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C ,H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - - if shape_size == 3: - batch_size = 0 - if is_hwc: - num_channels = image.shape[2] - else: - num_channels = image.shape[0] - else: - batch_size = image.shape[0] - if is_hwc: - num_channels = image.shape[3] - else: - num_channels = image.shape[1] - - if num_channels != 3: - raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) - if batch_size == 0: - return ToTensor(rgb_to_hsv(image, is_hwc)) - return ToTensor([rgb_to_hsv(img, is_hwc) for img in image]) - - -def Transpose(image, order): - """ - Transpose the input image with order - """ - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if len(image.shape) == 3: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return ToTensor(np.transpose(image, order)) - elif len(image.shape) == 4: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return ToTensor(np.transpose(image, order)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def RandomRotation( - image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 -): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if isinstance(degrees, numbers.Number): - if degrees < 0: - raise ValueError("If degrees is a single number, it cannot be negative.") - degrees = (-degrees, degrees) - elif isinstance(degrees, (list, tuple)): - if len(degrees) != 2: - raise ValueError("If degrees is a sequence, the length must be 2.") - else: - raise TypeError("Degrees must be a single non-negative number or a sequence") - - DE_PY_INTER_MODE = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'antialias': Image.ANTIALIAS, - 'bicubic': Image.BICUBIC - } - if fill_mode not in ('nearest', 'bilinear', 'antialias', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear, antialias,bicubic)') - - if isinstance(fill_value, int): - fill_value = tuple([fill_value] * 3) - - angle = random.uniform(degrees[0], degrees[1]) - fill_mode = DE_PY_INTER_MODE[fill_mode] - return ToTensor(image.rotate(angle, fill_mode, expand, center, fillcolor=fill_value)) - - -def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if isinstance(shift, numbers.Number): - width_fraction = shift - height_fraction = shift - elif isinstance(shift, list) or isinstance(shift, tuple): - if len(shift) == 2: - width_fraction = shift[0] - height_fraction = shift[1] - else: - raise ValueError('shift must be int or list/tuple of length 2') - - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - max_dx = width_fraction * width - max_dy = height_fraction * height - translations = (np.round(random.uniform(-max_dx, max_dx)), np.round(random.uniform(-max_dy, max_dy))) - - scale = 1.0 - shear = 0.0 - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - - angle = math.radians(0) - shear = math.radians(shear) - shear = [shear, 0] - scale = 1.0 / scale - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - translations = (0, 0) - scale = 1.0 - shear = degree - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - angle = math.radians(0) - - if shear is not None: - if isinstance(shear, numbers.Number): - shear = (-1 * shear, shear) - shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[0], shear[1])] - elif len(shear) == 2 or len(shear) == 4: - if len(shear) == 2: - shear = [shear[0], shear[1], shear[0], shear[1]] - elif len(shear) == 4: - shear = [s for s in shear] - shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[2], shear[3])] - else: - raise ValueError( - "Shear should be a single value or a tuple/list containing " + "two values. Got {}".format(shear) - ) - shear = [math.radians(s) for s in shear] - else: - shear = [0, 0] - - - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image: - Input tensor. Must be 3D. - zoom_range: - Tuple of floats; zoom range for width and height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order: - int, order of spline interpolation. see ndimage.interpolation.affine_transform - - Returns - Zoomed Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if isinstance(zoom_range, list) or isinstance(zoom_range, tuple): - if len(zoom_range) == 2: - scale = random.uniform(zoom_range[0], zoom_range[1]) - else: - raise ValueError('The length of zoom_range must be 2') - else: - raise ValueError( - "Zoom_range should be a single value or a tuple/list containing " + "two values. Got {}".format(zoom_range) - ) - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - translations = (0, 0) - shear = (0, 0) - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - angle = math.radians(0) - - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def Rescale(image, scale, offset=0): - ''' - - Parameters - ---------- - image: - 3-D image or 4-D images - scale: - Float, the scale to apply to the inputs. - offset: - Float, the offset to apply to the inputs. - Returns: - rescaled images - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - return ToTensor(image * scale + offset) - - -def RandomFlipVertical(image, prob=0.5): - - if prob > random.random(): - image = FlipVertical(image) - return image - - -def RandomFlipHorizontal(image, prob=0.5): - - if prob > random.random(): - image = FlipHorizontal(image) - return image - - -def HWC2CHW(image): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image_shape = image.shape - if (len(image_shape) == 3): - return Transpose(image, (2, 0, 1)) - elif (len(image_shape) == 4): - return Transpose(image, (0, 3, 1, 2)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def CHW2HWC(image): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image_shape = image.shape - if (len(image_shape) == 3): - return Transpose(image, (1, 2, 0)) - elif (len(image_shape) == 4): - return Transpose(image, (0, 2, 3, 1)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/image/paddle_image.py b/tensorlayer/dataflow/image/paddle_image.py deleted file mode 100644 index b33ef1503..000000000 --- a/tensorlayer/dataflow/image/paddle_image.py +++ /dev/null @@ -1,19 +0,0 @@ -import paddle -import numpy as np -from PIL import Image -from paddle.vision.transforms import functional as F - -__all_ = [ - 'Standardization', -] - - -def Standardization(img, mean, std, data_format='HWC'): - - if data_format == 'CHW': - mean = paddle.to_tensor(mean).reshape([-1, 1, 1]) - std = paddle.to_tensor(std).reshape([-1, 1, 1]) - else: - mean = paddle.to_tensor(mean) - std = paddle.to_tensor(std) - return (img - mean) / std diff --git a/tensorlayer/dataflow/image/tensorflow_image.py b/tensorlayer/dataflow/image/tensorflow_image.py deleted file mode 100644 index ca0ce41a3..000000000 --- a/tensorlayer/dataflow/image/tensorflow_image.py +++ /dev/null @@ -1,760 +0,0 @@ -import tensorflow as tf -import numpy as np -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import array_ops -from tensorflow.python.framework import ops -from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage -from tensorflow.python.framework import dtypes -from tensorflow.python.ops.image_ops_impl import convert_image_dtype -import numbers - -__all__ = [ - 'CentralCrop', - 'HsvToRgb', - 'AdjustBrightness', - 'AdjustContrast', - 'AdjustHue', - 'AdjustSaturation', - 'Crop', - 'FlipHorizontal', - 'FlipVertical', - 'GrayToRgb', - 'Standardization', - 'RgbToGray', - 'PadToBoundingbox', - 'Pad', - 'RandomBrightness', - 'RandomContrast', - 'RandomHue', - 'RandomSaturation', - 'RandomCrop', - 'Resize', - 'CropAndResize', - 'CropOrPad', - 'ResizeAndPad', - 'RgbToHsv', - 'Transpose', - 'RandomRotation', - 'RandomShift', - 'RandomShear', - 'RandomZoom', - 'Rescale', - 'RandomFlipVertical', - 'RandomFlipHorizontal', - 'HWC2CHW', - 'CHW2HWC', -] - - -def CentralCrop(image, central_fraction=None, size=None): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth], - or a 4-D Tensor of shape [batch_size, height, width, depth]. - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D / 4-D float Tensor, as per the input. - ------- - If backend is tensorflow, central_fraction will be used preferentially. if size is used,the height-width ratio will be equivalent to original ratio.. - If backend is mindspore, size will be used preferentially. - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - - if central_fraction is None: - outshape = np.shape(image) - if len(outshape) == 3: - h_axis = 0 - w_axis = 1 - elif len(outshape) == 4: - h_axis = 1 - w_axis = 2 - - if isinstance(size, numbers.Number): - target_height = size - target_width = size - elif isinstance(size, tuple) or isinstance(size, list): - if len(size) == 2: - target_height = size[0] - target_width = size[1] - else: - raise ValueError('The length of size must be 2') - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - if target_height > outshape[h_axis] or target_width > outshape[w_axis]: - raise ValueError("Centralcrop image size must < original image size.") - central_fraction = max(target_height / outshape[h_axis], target_width / outshape[w_axis]) - else: - if central_fraction > 1 or central_fraction <= 0: - raise ValueError('central_fraction must be in (0,1].') - - return tf.image.central_crop(image, central_fraction) - - -def HsvToRgb(image): - - return tf.image.hsv_to_rgb(image) - - -def AdjustBrightness(image, factor): - - return tf.image.adjust_brightness(image, delta=factor) - - -def AdjustContrast(image, factor): - - return tf.image.adjust_contrast(image, contrast_factor=factor) - - -def AdjustHue(image, factor): - - return tf.image.adjust_hue(image, delta=factor) - - -def AdjustSaturation(image, factor): - - return tf.image.adjust_saturation(image, saturation_factor=factor) - - -def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): - ''' - - Parameters - ---------- - image: - A image or a batch of images - offset_height: - Vertical coordinate of the top-left corner of the result in the input. - offset_width: - Horizontal coordinate of the top-left corner of the result in the input. - target_height: - Height of the result. - target_width: - Width of the result. - - Returns: - Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] - ------- - ''' - - return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) - - -def FlipHorizontal(image): - - return tf.image.flip_left_right(image) - - -def FlipVertical(image): - - return tf.image.flip_up_down(image) - - -def GrayToRgb(image): - - return tf.image.grayscale_to_rgb(image) - - -def RgbToGray(image): - - return tf.image.rgb_to_grayscale(image) - - -def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): - - return tf.image.pad_to_bounding_box( - image, - offset_height, - offset_width, - target_height, - target_width, - ) - - -def Pad(image, padding, padding_value=0, mode='constant'): - ''' - - Parameters - ---------- - image: - A 3-D or 4-D Tensor. - padding: - An integer or a list/tuple. If a single number is provided, pad all borders with this value. - If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. - If 4 values are provided as a list or tuple, pad the (top, bottom, left, right) respectively. - padding_value: - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - mode: - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - Returns: - A padded Tensor. Has the same type as tensor. - ------- - - ''' - image_shape = image.shape - if len(image_shape) == 3: - batch_size = 0 - elif len(image_shape) == 4: - batch_size = image_shape[0] - else: - raise TypeError('Image must be a 3-D tensor or 4-D tensor.') - - if isinstance(padding, int): - padding = ((padding, padding), (padding, padding)) - elif isinstance(padding, list) or isinstance(padding, tuple): - if len(padding) == 2: - padding = ((padding[0], padding[0]), (padding[1], padding[1])) - elif len(padding) == 4: - padding = ((padding[0], padding[1]), (padding[2], padding[3])) - else: - raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) - else: - raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) - if batch_size == 0: - padding = (padding[0], padding[1], (0, 0)) - else: - padding = ((0, 0), padding[0], padding[1], (0, 0)) - - return tf.pad(image, padding, mode=mode, constant_values=padding_value) - - -def Standardization(image, mean=None, std=None, channel_mode=False): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - image = tf.cast(image, tf.float32) - with ops.name_scope(None, 'Standardization', [image]) as scope: - image = ops.convert_to_tensor(image, name='image') - image = _AssertAtLeast3DImage(image) - - orig_dtype = image.dtype - if orig_dtype not in [dtypes.float16, dtypes.float32]: - image = convert_image_dtype(image, dtypes.float32) - - if mean is not None and std is not None: - mean = np.array(mean, dtype=np.float32) - std = np.array(std, dtype=np.float32) - image -= mean - image = math_ops.divide(image, std, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - elif mean is None and std is None: - if channel_mode: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:-1]) - #`num_pixels` is the number of elements in each channels of 'image' - image_mean = math_ops.reduce_mean(image, axis=[-2, -3], keepdims=True) - # `image_mean` is the mean of elements in each channels of 'image' - - stddev = math_ops.reduce_std(image, axis=[-2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_sttdev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_sttdev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - else: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) - #`num_pixels` is the number of elements in `image` - image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) - - # Apply a minimum normalization that protects us against uniform images. - stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_stddev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_stddev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - else: - raise ValueError('std and mean must both be None or not None') - - -def RandomBrightness(image, factor): - ''' - - Parameters - ---------- - image: - An image or images to adjust - factor: - Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). - Returns: - The brightness-adjusted image(s). - ------- - - ''' - - return tf.image.random_brightness(image, factor) - - -def RandomContrast(image, lower, upper, seed=None): - ''' - - Parameters - ---------- - image: - An image tensor with 3 or more dimensions. - lower: - float. Lower bound for the random contrast factor. - upper: - float. Upper bound for the random contrast factor. - seed: - A Python integer. Used to create a random seed. - - Returns: - The contrast-adjusted image(s). - ------- - ''' - - return tf.image.random_contrast(image, lower, upper, seed) - - -def RandomHue(image, factor, seed=None): - ''' - - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - factor: - float. The maximum value for the random factor. - seed: - An operation-specific seed. - - Returns: - Adjusted image(s), same shape and DType as `image`. - ------- - - ''' - - return tf.image.random_hue(image, factor, seed) - - -def RandomSaturation(image, lower, upper, seed=None): - ''' - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - lower: - float. Lower bound for the random saturation factor. - upper: - float. Upper bound for the random saturation factor. - seed: - An operation-specific seed. - - Returns: - Adjusted image(s), same shape and DType as `image`. - ------- - ''' - - return tf.image.random_saturation(image, lower, upper, seed) - - -def RandomCrop(image, size): - ''' - - Parameters - ---------- - image: - Input an image to crop. - size: - a list or tuple. if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. - shape of cropped image will be [height, width, 3]. - Returns: - A cropped image of the same rank as image and shape size. - ------- - ''' - - if isinstance(size, int): - crop_size = (size, size) - elif isinstance(size, (list, tuple)) and len(size) == 2: - crop_size = (size[0], size[1]) - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - - if len(image.shape) == 3: - h, w, c = image.shape - crop_size = crop_size + (c, ) - elif len(image.shape) == 4: - b, h, w, c = image.shape - crop_size = (b, ) + crop_size + (c, ) - - return tf.image.random_crop(image, size=crop_size) - - -def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): - ''' - - Parameters - ---------- - images: - Input an image to resize - size: - if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. - shape of resized image will be [height, width, 3]. - method: - An image.ResizeMethod, or string equivalent shoulid be in - (bilinear, lanczos3, lanczos5, bicubic, gaussian, nearest, area, mitchellcubic). - Defaults to bilinear. - preserve_aspect_ratio: - Whether to preserve the aspect ratio. - antialias: - Whether to use an anti-aliasing filter when downsampling an image. - Returns: - an resized image - ------- - - ''' - if isinstance(size, int): - size = [size, size] - elif len(size) != 2: - raise ValueError('Size should be a single integer or a list/tuple (h, w) of length 2.') - - return tf.image.resize(image, size, method, preserve_aspect_ratio, antialias) - - -def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. - boxes: - A 2-D tensor of shape [num_boxes, 4]. - box_indices: - A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). - The value of box_ind[i] specifies the image that the i-th box refers to. - crop_size: - A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. - The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. - method: - An optional string specifying the sampling method for resizing. - It can be either "bilinear" or "nearest" and default to "bilinear". - extrapolation_value: - An optional float. Defaults to 0. Value used for extrapolation, when applicable. - Returns: - A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. - ------- - - ''' - image_shape = image.shape - boxes_num = 0 - if isinstance(boxes, tf.Tensor): - boxes_num = boxes.shape[0] - elif isinstance(boxes, np.ndarray) or isinstance(boxes, list) or isinstance(boxes, tuple): - boxes = tf.constant(boxes) - boxes_num = boxes.shape[0] - - if isinstance(crop_size, int): - crop_size = (crop_size, crop_size) - crop_size = tf.constant(crop_size) - elif isinstance(crop_size, np.ndarray) or isinstance(crop_size, list) or isinstance(crop_size, tuple): - crop_size = tf.constant(crop_size) - - if isinstance(box_indices, np.ndarray) or isinstance(box_indices, list) or isinstance(box_indices, tuple): - box_indices = tf.constant(box_indices) - # if input is an image. - # a 3-D Tensor of shape [image_height, image_width, depth] should use 'tf.expand_dims(image, axis = 0)' - # to convert input to a 4-D Tensor of shape [batch_size,image_height, image_width, depth] - if len(image_shape) == 3: - image = tf.expand_dims(image, axis=0) - box_indices = np.zeros((boxes_num), dtype=np.int) - box_indices = tf.constant(box_indices) - - return tf.image.crop_and_resize( - image, boxes=boxes, box_indices=box_indices, crop_size=crop_size, method=method, - extrapolation_value=extrapolation_value - ) - - -def CropOrPad(image, target_height, target_width, is_hwc=True): - ''' - Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target width. - Returns: - Cropped and/or padded image. - ------- - ''' - - return tf.image.resize_with_crop_or_pad(image, target_height, target_width) - - -def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - Returns: - Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. - If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. - ------- - - ''' - - return tf.image.resize_with_pad(image, target_height, target_width, method=method, antialias=antialias) - - -def RgbToHsv(image): - - return tf.image.rgb_to_hsv(image) - - -def Transpose(image, order): - image = ops.convert_to_tensor(image) - image = _AssertAtLeast3DImage(image) - shape = image.get_shape() - if shape.ndims == 3 or shape.ndims is None: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return array_ops.transpose(image, order) - elif shape.ndims == 4: - if len(order) != 4: - raise ValueError('if image is 4-D tensor, order should be a list/tuple with length of 4') - return array_ops.transpose(image, order) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def RandomRotation( - image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 -): - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): - raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') - - image = tf.keras.preprocessing.image.random_rotation( - image, degrees, h, w, c, fill_mode, fill_value, interpolation_order - ) - return tf.convert_to_tensor(image) - - -def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(shift, numbers.Number): - width_fraction = shift - height_fraction = shift - elif isinstance(shift, list) or isinstance(shift, tuple): - if len(shift) == 2: - width_fraction = shift[0] - height_fraction = shift[1] - else: - raise ValueError('shift must be number or list/tuple of length 2') - - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): - raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') - - image = tf.keras.preprocessing.image.random_shift( - image, wrg=width_fraction, hrg=height_fraction, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, - cval=fill_value, interpolation_order=interpolation_order - ) - - return tf.convert_to_tensor(image) - - -def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - degree: - Transformation intensity in degrees. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - - image = tf.keras.preprocessing.image.random_shear( - image, intensity=degree, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, - interpolation_order=interpolation_order - ) - return tf.convert_to_tensor(image) - - -def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image: - Input tensor. Must be 3D. - zoom_range: - Tuple of floats; zoom range for width and height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order: - int, order of spline interpolation. see ndimage.interpolation.affine_transform - - Returns - Zoomed Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(zoom_range, numbers.Number): - zoom_range = (zoom_range, zoom_range) - elif isinstance(zoom_range, list) or isinstance(zoom_range, tuple): - if len(zoom_range) == 2: - zoom_range = (zoom_range[0], zoom_range[1]) - else: - raise ValueError('shift must be number or list/tuple of length 2') - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - - image = tf.keras.preprocessing.image.random_zoom( - image, zoom_range=zoom_range, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, - interpolation_order=interpolation_order - ) - return tf.convert_to_tensor(image) - - -def Rescale(image, scale, offset=0): - ''' - - Parameters - ---------- - image: - 3-D image or 4-D images - scale: - Float, the scale to apply to the inputs. - offset: - Float, the offset to apply to the inputs. - Returns: - rescaled images - ------- - ''' - image = tf.cast(image, dtype=tf.float32) - scale = tf.cast(scale, dtype=tf.float32) - offset = tf.cast(offset, dtype=tf.float32) - return image * scale + offset - - -def RandomFlipVertical(image): - - return tf.image.random_flip_up_down(image) - - -def RandomFlipHorizontal(image): - - return tf.image.random_flip_left_right(image) - - -def HWC2CHW(image): - - if (len(image.shape) == 3): - return Transpose(image, (2, 0, 1)) - elif (len(image.shape) == 4): - return Transpose(image, (0, 3, 1, 2)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def CHW2HWC(image): - - if (len(image.shape) == 3): - return Transpose(image, (1, 2, 0)) - elif (len(image.shape) == 4): - return Transpose(image, (0, 2, 3, 1)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py deleted file mode 100644 index 54e275f9e..000000000 --- a/tensorlayer/dataflow/mindspore_data.py +++ /dev/null @@ -1,287 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import mindspore.dataset as ds -import mindspore as ms -from enum import Enum -__all__ = [ - 'Apply', - 'Batch', - 'Concat', - 'CsvDataset', - 'Filter', - 'Flat_map', - 'FromGenerator', - 'FromSlices', - 'Map', - 'Prefetch', - 'Repeat', - 'Shuffle', - 'Skip', - 'Take', - 'TextFlieDataset', - 'TFRecordDataset', - 'Dataloader', -] - - -class Shuffle(str, Enum): - GLOBAL: str = "global" - FILES: str = "file" - - -def Apply(dataset, transformation_func): - - return dataset.apply(transformation_func) - - -def Batch( - dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None, inut_columns=None, - output_columns=None, column_order=None, pad_info=None -): - ''' - Combine batch_size number of consecutive rows into batches. - Parameters - ---------- - dataset - batch_size - drop_remainder - num_parallel_workers - per_batch_map - inut_columns - output_columns - column_order - pad_info - - Returns - ------- - - ''' - return dataset.batch( - batch_size=batch_size, drop_remainder=drop_remainder, num_parallel_workers=num_parallel_workers, - per_batch_map=per_batch_map, input_columns=inut_columns, output_columns=output_columns, - column_order=column_order, pad_info=pad_info - ) - - -def Concat(dataset_1, dataset_2): - - return dataset_1.concat(dataset_2) - - -def CsvDataset( - file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=Shuffle.GLOBAL, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, - shard_id=None, cache=None -): - """ - A source dataset that reads and parses comma-separated values (CSV) datasets. - - Examples: - >>> import mindspore.dataset as dataset - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files - >>> dataset = dataset.CSVDataset(dataset_files=dataset_files, column_names=['col1', 'col2', 'col3', 'col4']) - """ - return ds.CSVDataset( - dataset_files=file_pattern, field_delim=field_delim, column_defaults=column_defaults, column_names=column_names, - num_samples=numples_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, - shard_id=shard_id, cache=cache - ) - - -def Filter(dataset, predicate): - - return dataset.filter(predicate) - - -def Flat_map(dataset, map_func): - - return dataset.flat_map(map_func) - - -def FromGenerator( - generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, - num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, - python_multiprocessing=True -): - - return ds.GeneratorDataset( - source=generator, column_names=column_names, column_types=column_types, schema=schema, num_samples=num_samples, - num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler, num_shards=num_shards, - shard_id=shard_id, python_multiprocessing=python_multiprocessing - ) - - -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): - - return ds.NumpySlicesDataset( - data=tensor, column_names=column_names, num_samples=num_samples, num_parallel_workers=num_parallel_workers, - shuffle=shuffle, sampler=sampler, num_shards=num_shards, shard_id=shard_id - ) - - -def Map( - dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, - num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None -): - """ Maps map_func across the elements of this dataset. - - Parameters - ---------- - dataset : DataFlow - input DataFlow - map_func : function - A function mapping a dataset element to another dataset element. - num_parallel_calls - - Returns - ------- - - """ - return dataset.map( - operations=map_func, input_columns=input_columns, output_columns=output_columns, column_order=column_order, - num_parallel_workers=num_parallel_workers, python_multiprocessing=python_multiprocessing, cache=cache, - callbacks=callbacks - ) - - -def Prefetch(dataset, buffer_size): - - batch_size = dataset.get_batch_size() - prefetch_size = batch_size * buffer_size - - return dataset.config.set_prefetch_size(prefetch_size) - - -def Repeat(dataset, count=None): - - return dataset.repeat(count) - - -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): - - #dataset.config.set_seed(seed) - - return dataset.shuffle(buffer_size) - - -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - - - Returns - ------- - - ''' - return dataset.skip(count) - - -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. - Returns - ------- - - ''' - return dataset.take(count) - - -def TextFlieDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, - num_shards=None, shard_id=None, cache=None -): - """ - A source dataset that reads and parses datasets stored on disk in text format. - The generated dataset has one column ['text']. - - Examples: - >>> import mindspore.dataset as dataset - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files - >>> dataset = dataset.TextFileDataset(dataset_files=dataset_files) - """ - if shuffle is None: - shuffle = Shuffle.GLOBAL - return ds.TextFileDataset( - dataset_files=filenames, num_samples=num_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, - num_shards=num_shards, shard_id=shard_id, cache=cache - ) - - -def TFRecordDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, - num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None -): - """ - A source dataset that reads and parses datasets stored on disk in TFData format. - - Examples: - >>> import mindspore.dataset as dataset - >>> import mindspore.common.dtype as mstype - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple tf data files - >>> - >>> # 1) Get all rows from dataset_files with no explicit schema - >>> # The meta-data in the first row will be used as a schema. - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files) - >>> - >>> # 2) Get all rows from dataset_files with user-defined schema - >>> schema = dataset.Schema() - >>> schema.add_column('col_1d', de_type=mindspore.int64, shape=[2]) - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema=schema) - >>> - >>> # 3) Get all rows from dataset_files with schema file "./schema.json" - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema="./schema.json") - """ - if shuffle is None: - shuffle = Shuffle.GLOBAL - return ds.TFRecordDataset( - dataset_files=filenames, schema=schema, columns_list=columns_list, num_samples=num_samples, - num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, - shard_equal_rows=shard_equal_rows, cache=cache - ) - - -def Zip(datasets): - ''' - Creates a Dataset by zipping together the given datasets. - Parameters - ---------- - datasets: - A tuple of datasets to be zipped together. - Returns - ------- - - ''' - return ds.zip(datasets) - - -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): - - if shuffle: - dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) - - dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) - dataset = Prefetch(dataset, buffer_size=prefetch) - - return dataset diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py deleted file mode 100644 index 04d4b3327..000000000 --- a/tensorlayer/dataflow/paddle_data.py +++ /dev/null @@ -1,131 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import numpy as np -import paddle -from paddle.io import Dataset, BatchSampler, DataLoader, IterableDataset -__all__ = [ - 'Concat', - 'FromGenerator', - 'FromSlices', - 'Map', - # 'Shuffle', - # 'Batch', - 'Dataloader', -] - - -def to_list(value): - if value is None: - return value - if isinstance(value, (list, tuple)): - return list(value) - return [value] - - -class FromGenerator(Dataset): - - def __init__(self, generator): - - if not callable(generator): - raise TypeError("'generator' must be callable") - self.generator = generator() - self.datas = [] - self.labels = [] - for data, label in self.generator: - self.datas.append(data) - self.labels.append(label) - - def __getitem__(self, idx): - - x = self.datas[idx] - y = self.labels[idx] - - return x, y - - def __len__(self): - - return self.datas.shape[0] - - -class FromSlices(Dataset): - - def __init__(self, datas, transform=None): - self.datas = datas[0] - self.labels = datas[1] - self.transform = transform - - if len(self.datas) != len(self.labels): - raise ValueError('Datas and labels not have same shape of the 1st dimension.') - - def __getitem__(self, idx): - data = paddle.to_tensor(self.datas[idx], dtype='float32') - label = paddle.to_tensor(self.labels[idx], dtype='int64') - if self.transform is not None: - data = self.transform(data) - return data, label - - def __len__(self): - - return len(self.datas) - - -class Concat(IterableDataset): - - def __init__(self, datasets): - self.datasets = list(datasets) - assert len(self.datasets) > 0, "input datasets shoule not be empty" - for i, dataset in enumerate(self.datasets): - assert isinstance(dataset, IterableDataset), \ - "ChainDataset only support paddle.io.IterableDataset" - - def __iter__(self): - for dataset in self.datasets: - for sample in dataset: - yield sample - - -class Map(Dataset): - - def __init__(self, dataset, transform): - self.isDataset = False - self.transform = transform - if isinstance(dataset, Dataset): - self.isDataset = True - self.dataset = dataset - elif isinstance(dataset, list) or isinstance(dataset, tuple): - self.datas = dataset[0] - self.labels = dataset[1] - else: - raise TypeError( - " 'dataset' should be subclass instance of paddle.io.Dataset " - "or a [data, label] list/tulpe, not a {}".format(type(dataset)) - ) - - def __getitem__(self, idx): - if self.isDataset: - x = self.dataset[idx][0] - if not isinstance(x, np.ndarray): - x = np.asarray(x) - x = self.transform(x) - y = self.dataset[idx][1] - else: - x = self.datas[idx] - if not isinstance(x, np.ndarray): - x = np.asarray(x) - x = self.transform(x) - y = self.labels[idx] - - return x, y - - def __len__(self): - - if self.isDataset: - return len(self.dataset[0]) - else: - return len(self.datas) - - -def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): - - return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py deleted file mode 100644 index 4da229a43..000000000 --- a/tensorlayer/dataflow/tensorflow_data.py +++ /dev/null @@ -1,266 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorflow as tf - -__all__ = [ - 'Apply', - 'Batch', - 'Concat', - 'CsvDataset', - 'Filter', - 'Flat_map', - 'FromGenerator', - 'FromSlices', - 'Map', - 'Prefetch', - 'Repeat', - 'Shuffle', - 'Skip', - 'Take', - 'TextFlieDataset', - 'TFRecordDataset', - 'Zip', - 'Dataloader', -] - - -def Apply(dataset, transformation_func): - """Applies a transformation function to this dataset. - `apply` enables chaining of custom `Dataset` transformations, which are - represented as functions that take one `Dataset` argument and return a - transformed `Dataset`. - >>> dataset = tf.data.Dataset.range(100) - >>> def dataset_fn(dataset): - ... return dataset.filter(lambda x: x < 5) - >>> dataset = dataset.apply(dataset_fn) - >>> list(dataset.as_numpy_iterator()) - [0, 1, 2, 3, 4] - Args: - transformation_func: A function that takes one `Dataset` argument and - returns a `Dataset`. - Returns: - Dataset: The `Dataset` returned by applying `transformation_func` to this - dataset. - """ - return dataset.apply(transformation_func) - - -def Batch(dataset, batch_size, drop_remainder=False): - ''' - - Parameters - ---------- - dataset - batch_size - drop_remainder - - Returns - ------- - - ''' - return dataset.batch(batch_size=batch_size, drop_remainder=drop_remainder) - - -def Concat(dataset_1, dataset_2): - - return dataset_1.concatenate(dataset_2) - - -def CsvDataset( - file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, - shard_id=None, cache=None -): - """Reads CSV files into a dataset. - Reads CSV files into a dataset, where each element is a (features, labels) - tuple that corresponds to a batch of CSV rows. The features dictionary - maps feature column names to `Tensor`s containing the corresponding - feature data, and labels is a `Tensor` containing the batch's label data. - """ - return tf.data.experimental.make_csv_dataset( - file_pattern, batch_size, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False - ) - - -def Filter(dataset, predicate): - ''' - Filters this dataset according to predicate. - Parameters - ---------- - dataset : - A dataset - predicate : - A function mapping a dataset element to a boolean. - Returns : - The Dataset containing the elements of this dataset for which predicate is True. - ------- - - ''' - return dataset.filter(predicate) - - -def Flat_map(dataset, map_func): - ''' - Maps map_func across this dataset and flattens the result. - Parameters - ---------- - dataset: - A dataset - map_func - A function mapping a dataset element to a dataset. - Returns - A Dataset. - ------- - - ''' - return dataset.flat_map(map_func) - - -def FromGenerator( - generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, - num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, - python_multiprocessing=True -): - """Creates a `Dataset` whose elements are generated by `generator`. - - generator: - A callable object - """ - return tf.data.Dataset.from_generator(generator, output_types, output_shapes=output_shapes, args=args) - - -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): - - return tf.data.Dataset.from_tensor_slices(tensor) - - -def Map( - dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, - num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None -): - """ Maps map_func across the elements of this dataset. - - Parameters - ---------- - dataset : DataFlow - input DataFlow - map_func : function - A function mapping a dataset element to another dataset element. - num_parallel_calls - - Returns - ------- - - """ - return dataset.map(map_func, num_parallel_calls=num_parallel_calls) - - -def Prefetch(dataset, buffer_size): - ''' - Creates a Dataset that prefetches elements from this dataset. - Parameters - ---------- - dataset: Dataflow - A dataset - buffer_size : - A tf.int64 scalar tf.Tensor, representing the maximum number of elements that will be buffered when prefetching. - Returns - A Dataset - ------- - - ''' - return dataset.prefetch(buffer_size=buffer_size) - - -def Repeat(dataset, count=None): - return dataset.repeat(count=count) - - -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): - return dataset.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) - - -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - If count is greater than the size of this dataset, the new dataset will contain no elements. - If count is -1, skips the entire dataset. - - Returns - ------- - - ''' - return dataset.skip(count) - - -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. - Returns - ------- - - ''' - return dataset.take(count) - - -def TextFlieDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, - num_shards=None, shard_id=None, cache=None -): - - return tf.data.TextLineDataset(filenames, compression_type, buffer_size, num_parallel_reads) - - -def TFRecordDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, - num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None -): - - return tf.data.TFRecordDataset(filenames, compression_type, buffer_size, num_parallel_reads) - - -def Zip(datasets): - ''' - Creates a Dataset by zipping together the given datasets. - Parameters - ---------- - datasets: - A tuple of datasets to be zipped together. - Returns - ------- - - ''' - return tf.data.Dataset.zip(datasets) - - -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=1024): - - if shuffle: - dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) - - dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) - dataset = Prefetch(dataset, buffer_size=prefetch) - - return dataset From febfed5ccca7a618ba14ccd33fdb7217ef830bb2 Mon Sep 17 00:00:00 2001 From: hanjr Date: Tue, 22 Jun 2021 10:30:11 +0800 Subject: [PATCH 16/36] add vision and docs --- docs/index.rst | 1 + docs/modules/vision.rst | 204 +++ tensorlayer/dataflow/__init__.py | 21 + tensorlayer/dataflow/dataflow_examples.py | 56 + tensorlayer/dataflow/mindspore_data.py | 175 +++ tensorlayer/dataflow/paddle_data.py | 120 ++ tensorlayer/dataflow/tensorflow_data.py | 221 +++ .../optimizers/mindspore_optimizers.py | 12 +- tensorlayer/vision/__init__.py | 3 + tensorlayer/vision/functional_cv2.py | 667 ++++++++ tensorlayer/vision/functional_pil.py | 554 +++++++ tensorlayer/vision/load_vision_backend.py | 16 + tensorlayer/vision/mindspore_vision.py | 625 ++++++++ tensorlayer/vision/paddle_vision.py | 608 +++++++ tensorlayer/vision/tensorflow_vision.py | 1393 +++++++++++++++++ tensorlayer/vision/transforms.py | 1256 +++++++++++++++ 16 files changed, 5926 insertions(+), 6 deletions(-) create mode 100644 docs/modules/vision.rst create mode 100644 tensorlayer/dataflow/__init__.py create mode 100644 tensorlayer/dataflow/dataflow_examples.py create mode 100644 tensorlayer/dataflow/mindspore_data.py create mode 100644 tensorlayer/dataflow/paddle_data.py create mode 100644 tensorlayer/dataflow/tensorflow_data.py create mode 100644 tensorlayer/vision/__init__.py create mode 100644 tensorlayer/vision/functional_cv2.py create mode 100644 tensorlayer/vision/functional_pil.py create mode 100644 tensorlayer/vision/load_vision_backend.py create mode 100644 tensorlayer/vision/mindspore_vision.py create mode 100644 tensorlayer/vision/paddle_vision.py create mode 100644 tensorlayer/vision/tensorflow_vision.py create mode 100644 tensorlayer/vision/transforms.py diff --git a/docs/index.rst b/docs/index.rst index b4b1fd2b6..6bab309da 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,6 +63,7 @@ method, this part of the documentation is for you. modules/layers modules/models modules/nlp + modules/vision modules/initializers modules/rein modules/utils diff --git a/docs/modules/vision.rst b/docs/modules/vision.rst new file mode 100644 index 000000000..70718bf64 --- /dev/null +++ b/docs/modules/vision.rst @@ -0,0 +1,204 @@ +API - Vision +============ + +.. automodule:: tensorlayer.vision.transforms + +.. ----------------------------------------------------------- +.. Vision Transforms List +.. ----------------------------------------------------------- + +Vision Transforms list +---------------------- + +.. autosummary:: + + ToTensor + Compose + + Crop + CentralCrop + RandomCrop + Pad + PadToBoundingbox + Resize + RandomResizedCrop + + RgbToGray + HsvToRgb + RgbToHsv + + AdjustBrightness + AdjustContrast + AdjustHue + AdjustSaturation + RandomBrightness + RandomContrast + RandomHue + RandomSaturation + ColorJitter + + FlipHorizontal + FlipVertical + RandomFlipHorizontal + RandomFlipVertical + + RandomRotation + RandomShift + RandomShear + RandomZoom + RandomAffine + + Transpose + HWC2CHW + CHW2HWC + + Normalize + StandardizePerImage + +.. ----------------------------------------------------------- +.. Vision Transforms +.. ----------------------------------------------------------- + +Vision Transforms +----------------- + +ToTensor +^^^^^^^^^^^^^^^^ +.. autoclass:: ToTensor + + +Compose +^^^^^^^^^^^^^^^^ +.. autoclass:: Compose + +Crop +^^^^^^^^^^^^^^^^ +.. autoclass:: Crop + +CentralCrop +^^^^^^^^^^^^^^^^ +.. autoclass:: CentralCrop + +RandomCrop +^^^^^^^^^^^^^^^^ +.. autoclass:: RandomCrop + +Pad +^^^^^^^^^^^^^^^^ +.. autoclass:: Pad + +PadToBoundingbox +^^^^^^^^^^^^^^^^ +.. autoclass:: PadToBoundingbox + +Resize +^^^^^^^^^^^^^^^^ +.. autoclass:: Resize + +RandomResizedCrop +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomResizedCrop + +RgbToGray +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RgbToGray + +HsvToRgb +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: HsvToRgb + +RgbToHsv +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RgbToHsv + +AdjustBrightness +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustBrightness + +AdjustContrast +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustContrast + +AdjustHue +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustHue + +AdjustSaturation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustSaturation + +RandomBrightness +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomBrightness + +RandomContrast +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomContrast + +RandomHue +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomHue + +RandomSaturation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomSaturation + +ColorJitter +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: ColorJitter + +FlipHorizontal +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: FlipHorizontal + +FlipVertical +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: FlipVertical + +RandomFlipHorizontal +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomFlipHorizontal + +RandomFlipVertical +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomFlipVertical + +RandomRotation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomRotation + +RandomShift +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomShift + +RandomShear +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomShear + +RandomZoom +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomZoom + +RandomAffine +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomAffine + +Transpose +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Transpose + +HWC2CHW +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: HWC2CHW + +CHW2HWC +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: CHW2HWC + +Normalize +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Normalize + +StandardizePerImage +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: StandardizePerImage \ No newline at end of file diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py new file mode 100644 index 000000000..6c625acc2 --- /dev/null +++ b/tensorlayer/dataflow/__init__.py @@ -0,0 +1,21 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function + +from tensorlayer.backend.ops.load_backend import BACKEND + + +if BACKEND == 'tensorflow': + from .tensorflow_data import * + +elif BACKEND == 'mindspore': + from .mindspore_data import * + +elif BACKEND == 'paddle': + from .paddle_data import * + +elif BACKEND == 'dragon': + pass + +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/dataflow/dataflow_examples.py b/tensorlayer/dataflow/dataflow_examples.py new file mode 100644 index 000000000..2bee24684 --- /dev/null +++ b/tensorlayer/dataflow/dataflow_examples.py @@ -0,0 +1,56 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from tensorlayer.dataflow import Dataset +import numpy as np + +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield (_input, np.array(_target)) + + +batch_size = 128 +shuffle_buffer_size = 128 +n_epoch = 10 + +import tensorflow as tf + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +import multiprocessing +train_ds = Dataset.from_generator( + generator=generator_train, output_types=(tl.float32, tl.int32) +) # , output_shapes=((24, 24, 3), (1))) + +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) + +train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) + +for X_batch, y_batch in train_ds: + print(X_batch.shape, y_batch.shape) diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py new file mode 100644 index 000000000..e50c15bfe --- /dev/null +++ b/tensorlayer/dataflow/mindspore_data.py @@ -0,0 +1,175 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import mindspore.dataset as ds +import mindspore as ms +from enum import Enum +__all__ = [ + 'Apply', + 'Batch', + 'Concat', + 'Filter', + 'Flat_map', + 'FromGenerator', + 'FromSlices', + 'Map', + 'Prefetch', + 'Repeat', + 'Shuffle', + 'Skip', + 'Take', + 'Dataloader', +] + + +class shuffle_str(str, Enum): + GLOBAL: str = "global" + FILES: str = "file" + + +def Apply(dataset, transformation_func): + + return dataset.apply(transformation_func) + + +def Batch(dataset, batch_size, drop_last=False): + ''' + + Parameters + ---------- + dataset + batch_size + drop_last + Returns + ------- + + ''' + return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) + + +def Concat(dataset_1, dataset_2): + + return dataset_1.concat(dataset_2) + + +def Filter(dataset, predicate): + + return dataset.filter(predicate) + + +def Flat_map(dataset, map_func): + + return dataset.flat_map(map_func) + + +def FromGenerator(generator, transform = None): + + return ds.GeneratorDataset(source=generator, column_names=["data", "label"]) + + +def FromSlices( + tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, + shard_id=None +): + + return ds.NumpySlicesDataset( + data=tensor, column_names=column_names, num_samples=num_samples, num_parallel_workers=num_parallel_workers, + shuffle=shuffle, sampler=sampler, num_shards=num_shards, shard_id=shard_id + ) + + +def Map( + dataset, map_func, input_columns=None): + """ Maps map_func across the elements of this dataset. + + Parameters + ---------- + dataset : DataFlow + input DataFlow + map_func : function + A function mapping a dataset element to another dataset element. + num_parallel_calls + + Returns + ------- + + """ + return dataset.map( + operations=map_func, input_columns=input_columns + ) + + +def Prefetch(dataset, buffer_size): + + return dataset.config.set_prefetch_size(buffer_size) + + +def Repeat(dataset, count=None): + + return dataset.repeat(count) + + +def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): + + #dataset.config.set_seed(seed) + + return dataset.shuffle(buffer_size) + + +def Skip(dataset, count): + ''' + Creates a Dataset that skips count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. + + + Returns + ------- + + ''' + return dataset.skip(count) + + +def Take(dataset, count): + ''' + Creates a Dataset with at most count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. + If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. + Returns + ------- + + ''' + return dataset.take(count) + +def Zip(datasets): + ''' + Creates a Dataset by zipping together the given datasets. + Parameters + ---------- + datasets: + A tuple of datasets to be zipped together. + Returns + ------- + + ''' + return ds.zip(datasets) + + +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=2, shuffle_buffer_size=10000): + + + if shuffle: + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) + dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) + + + return dataset diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py new file mode 100644 index 000000000..fac24a33c --- /dev/null +++ b/tensorlayer/dataflow/paddle_data.py @@ -0,0 +1,120 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np +import paddle +from paddle.io import Dataset, BatchSampler, DataLoader, IterableDataset +__all__ = [ + 'Concat', + 'FromGenerator', + 'FromSlices', + 'Map', + # 'Shuffle', + # 'Batch', + 'Dataloader', +] + + +def to_list(value): + if value is None: + return value + if isinstance(value, (list, tuple)): + return list(value) + return [value] + + +class FromGenerator(Dataset): + + def __init__(self, generator, transform = None): + + if not callable(generator): + raise TypeError("'generator' must be callable") + self.generator = generator() + self.transform = transform + self.datas = [] + self.labels = [] + for data, label in self.generator: + self.datas.append(data) + self.labels.append(label) + + def __getitem__(self, idx): + x = self.datas[idx] + if self.transform: + x = self.transform(x) + y = self.labels[idx] + + return x, y + + def __len__(self): + + return len(self.datas) + + +class FromSlices(Dataset): + + def __init__(self, datas, transform = None): + self.datas = datas[0] + self.labels = datas[1] + self.transform = transform + + if len(self.datas) != len(self.labels): + raise ValueError('Datas and labels not have same shape of the 1st dimension.') + + def __getitem__(self, idx): + + data = paddle.to_tensor(self.datas[idx], dtype='float32') + label = paddle.to_tensor(self.labels[idx], dtype='int64') + if self.transform is not None: + data = self.transform(data) + return data, label + + def __len__(self): + + return len(self.datas) + + +class Concat(IterableDataset): + + def __init__(self, datasets): + self.datasets = list(datasets) + assert len(self.datasets) > 0, "input datasets shoule not be empty" + for i, dataset in enumerate(self.datasets): + assert isinstance(dataset, IterableDataset), \ + "Concat only support paddle.io.IterableDataset" + + def __iter__(self): + for dataset in self.datasets: + for sample in dataset: + yield sample + + +class Map(Dataset): + + def __init__(self, dataset, transform): + # self.isDataset = False + self.transform = transform + self.dataset = dataset + + + def __getitem__(self, idx): + + x = self.dataset[idx][0] + # if not isinstance(x, np.ndarray): + # x = np.asarray(x) + + if self.transform: + x = self.transform(x) + y = self.dataset[idx][1] + + + return x, y + + def __len__(self): + + return len(self.dataset) + + + +def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): + + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py new file mode 100644 index 000000000..85c4c593c --- /dev/null +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -0,0 +1,221 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorflow as tf + +__all__ = [ + 'Apply', + 'Batch', + 'Concat', + 'Filter', + 'Flat_map', + 'FromGenerator', + 'FromSlices', + 'Map', + 'Prefetch', + 'Repeat', + 'Shuffle', + 'Skip', + 'Take', + 'Zip', + 'Dataloader', +] + + +def Apply(dataset, transformation_func): + """Applies a transformation function to this dataset. + `apply` enables chaining of custom `Dataset` transformations, which are + represented as functions that take one `Dataset` argument and return a + transformed `Dataset`. + + [0, 1, 2, 3, 4] + Args: + transformation_func: A function that takes one `Dataset` argument and + returns a `Dataset`. + Returns: + Dataset: The `Dataset` returned by applying `transformation_func` to this + dataset. + """ + return dataset.apply(transformation_func) + + +def Batch(dataset, batch_size, drop_last=False): + ''' + + Parameters + ---------- + dataset + batch_size + drop_remainder + + Returns + ------- + + ''' + return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) + + +def Concat(dataset_1, dataset_2): + + return dataset_1.concatenate(dataset_2) + + +def Filter(dataset, predicate): + ''' + Filters this dataset according to predicate. + Parameters + ---------- + dataset : + A dataset + predicate : + A function mapping a dataset element to a boolean. + Returns : + The Dataset containing the elements of this dataset for which predicate is True. + ------- + + ''' + return dataset.filter(predicate) + + +def Flat_map(dataset, map_func): + ''' + Maps map_func across this dataset and flattens the result. + Parameters + ---------- + dataset: + A dataset + map_func + A function mapping a dataset element to a dataset. + Returns + A Dataset. + ------- + + ''' + return dataset.flat_map(map_func) + + +def FromGenerator( + generator, output_types = (tf.float32, tf.int32), column_names=None +): + """Creates a `Dataset` whose elements are generated by `generator`. + + generator: + A callable object + """ + return tf.data.Dataset.from_generator(generator, output_types = output_types, output_shapes=None, args=None) + + +def FromSlices( + tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, + shard_id=None +): + + return tf.data.Dataset.from_tensor_slices(tensor) + + +def Map( + dataset, map_func, input_columns=None, output_columns=None, column_order=None, + num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None +): + """ Maps map_func across the elements of this dataset. + + Parameters + ---------- + dataset : DataFlow + input DataFlow + map_func : function + A function mapping a dataset element to another dataset element. + num_parallel_calls + + Returns + ------- + + """ + return dataset.map(map_func) + + +def Prefetch(dataset, buffer_size): + ''' + Creates a Dataset that prefetches elements from this dataset. + Parameters + ---------- + dataset: Dataflow + A dataset + buffer_size : + A tf.int64 scalar tf.Tensor, representing the maximum number of elements that will be buffered when prefetching. + Returns + A Dataset + ------- + + ''' + return dataset.prefetch(buffer_size=buffer_size) + + +def Repeat(dataset, count=None): + return dataset.repeat(count=count) + + +def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=True): + return dataset.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) + + +def Skip(dataset, count): + ''' + Creates a Dataset that skips count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. + If count is greater than the size of this dataset, the new dataset will contain no elements. + If count is -1, skips the entire dataset. + + Returns + ------- + + ''' + return dataset.skip(count) + + +def Take(dataset, count): + ''' + Creates a Dataset with at most count elements from this dataset. + Parameters + ---------- + dataset: + A dataset + count: + A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. + If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. + Returns + ------- + + ''' + return dataset.take(count) + +def Zip(datasets): + ''' + Creates a Dataset by zipping together the given datasets. + Parameters + ---------- + datasets: + A tuple of datasets to be zipped together. + Returns + ------- + + ''' + return tf.data.Dataset.zip(datasets) + + + +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=2, shuffle_buffer_size=10000): + + + if shuffle: + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) + + dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) + dataset = Prefetch(dataset, buffer_size=prefetch) + + return dataset diff --git a/tensorlayer/optimizers/mindspore_optimizers.py b/tensorlayer/optimizers/mindspore_optimizers.py index dd70e5fe2..6472d4e7d 100644 --- a/tensorlayer/optimizers/mindspore_optimizers.py +++ b/tensorlayer/optimizers/mindspore_optimizers.py @@ -23,7 +23,7 @@ class Adagrad(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Adagrad optimizer function not implemented') @@ -55,7 +55,7 @@ class Adamax(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Adamax optimizer function not implemented') @@ -64,7 +64,7 @@ class Ftrl(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Ftrl optimizer function not implemented') @@ -73,7 +73,7 @@ class Nadam(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Nadam optimizer function not implemented') @@ -82,7 +82,7 @@ class RMSprop(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('RMSprop optimizer function not implemented') @@ -91,7 +91,7 @@ class RMSprop(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('RMSprop optimizer function not implemented') diff --git a/tensorlayer/vision/__init__.py b/tensorlayer/vision/__init__.py new file mode 100644 index 000000000..9f0fc8e48 --- /dev/null +++ b/tensorlayer/vision/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from . import transforms diff --git a/tensorlayer/vision/functional_cv2.py b/tensorlayer/vision/functional_cv2.py new file mode 100644 index 000000000..de8e18e42 --- /dev/null +++ b/tensorlayer/vision/functional_cv2.py @@ -0,0 +1,667 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import numpy as np +from numpy import sin, cos, tan +import math +import numbers +import importlib + + +def try_import(module_name): + """Try importing a module, with an informative error message on failure.""" + install_name = module_name + + if module_name.find('.') > -1: + install_name = module_name.split('.')[0] + + if module_name == 'cv2': + install_name = 'opencv-python' + + try: + mod = importlib.import_module(module_name) + return mod + except ImportError: + err_msg = ( + "Failed importing {}. This likely means that some paddle modules " + "require additional dependencies that have to be " + "manually installed (usually with `pip install {}`). " + ).format(module_name, install_name) + raise ImportError(err_msg) + + +def crop(image, offset_height, offset_width, target_height, target_width): + image_height, image_width = image.shape[0:2] + if offset_width < 0: + raise ValueError('offset_width must be >0.') + if offset_height < 0: + raise ValueError('offset_height must be >0.') + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if offset_width + target_width > image_width: + raise ValueError('offset_width + target_width must be <= image width.') + if offset_height + target_height > image_height: + raise ValueError('offset_height + target_height must be <= image height.') + + return image[offset_height:offset_height + target_height, offset_width:offset_width + target_width] + + +def center_crop(image, size, central_fraction): + + image_height, image_width = image.shape[0:2] + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise TypeError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(size) + ) + + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + + elif central_fraction is not None: + if central_fraction <= 0.0 or central_fraction > 1.0: + raise ValueError('central_fraction must be within (0, 1]') + + target_height = int(central_fraction * image_height) + target_width = int(central_fraction * image_width) + + crop_top = int(round((image_height - target_height) / 2.)) + crop_left = int(round((image_width - target_width) / 2.)) + + return crop(image, crop_top, crop_left, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + + if isinstance(padding, int): + top = bottom = left = right = padding + + elif isinstance(padding, (tuple, list)): + if len(padding) == 2: + left = right = padding[0] + top = bottom = padding[1] + elif len(padding) == 4: + left = padding[0] + top = padding[1] + right = padding[2] + bottom = padding[3] + else: + raise TypeError("The size of the padding list or tuple should be 2 or 4." "But got {}".format(padding)) + else: + raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4." "But got {}".format(padding)) + + if mode not in ['constant', 'edge', 'reflect', 'symmetric']: + raise ValueError("Padding mode should be 'constant', 'edge', 'reflect', or 'symmetric'.") + cv2 = try_import('cv2') + _cv2_pad_from_str = { + 'constant': cv2.BORDER_CONSTANT, + 'edge': cv2.BORDER_REPLICATE, + 'reflect': cv2.BORDER_REFLECT_101, + 'symmetric': cv2.BORDER_REFLECT + } + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.copyMakeBorder( + image, top=top, bottom=bottom, left=left, right=right, borderType=_cv2_pad_from_str[mode], + value=padding_value + )[:, :, np.newaxis] + else: + return cv2.copyMakeBorder( + image, top=top, bottom=bottom, left=left, right=right, borderType=_cv2_pad_from_str[mode], + value=padding_value + ) + + +def resize(image, size, method): + + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + if method not in ('nearest', 'bilinear', 'area', 'bicubic' 'lanczos'): + raise ValueError( + "Unknown resize method! resize method must be in " + "(\'nearest\',\'bilinear\',\'bicubic\',\'area\',\'lanczos\')" + ) + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w = image.shape[:2] + + if isinstance(size, int): + if (w <= h and w == size) or (h <= w and h == size): + return image + if w < h: + target_w = size + target_h = int(size * h / w) + else: + target_h = size + target_w = int(size * w / h) + size = (target_h, target_w) + output = cv2.resize(image, dsize=(size[1], size[0]), interpolation=_cv2_interp_from_str[method]) + if len(image.shape) == 3 and image.shape[2] == 1: + return output[:, :, np.newaxis] + else: + return output + + +def transpose(image, order): + + if not (isinstance(order, (list, tuple)) and len(order) == 3): + raise TypeError("Order must be a list/tuple of length 3." "But got {}.".format(order)) + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose(order) + + +def hwc_to_chw(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose((2, 0, 1)) + + +def chw_to_hwc(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose((1, 2, 0)) + + +def rgb_to_hsv(image): + + cv2 = try_import('cv2') + image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + return image + + +def hsv_to_rgb(image): + + cv2 = try_import('cv2') + image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) + return image + + +def rgb_to_gray(image, num_output_channels): + + cv2 = try_import('cv2') + + if num_output_channels == 1: + image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis] + elif num_output_channels == 3: + image = np.broadcast_to(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis], image.shape) + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return image + + +def adjust_brightness(image, brightness_factor): + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + cv2 = try_import('cv2') + + table = np.array([i * brightness_factor for i in range(0, 256)]).clip(0, 255).astype('uint8') + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.LUT(image, table)[:, :, np.newaxis] + else: + return cv2.LUT(image, table) + + +def adjust_contrast(image, contrast_factor): + """Adjusts contrast of an image. + + Args: + img (np.array): Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + np.array: Contrast adjusted image. + + """ + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + cv2 = try_import('cv2') + + table = np.array([(i - 127) * contrast_factor + 127 for i in range(0, 256)]).clip(0, 255).astype('uint8') + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.LUT(image, table)[:, :, np.newaxis] + else: + return cv2.LUT(image, table) + + +def adjust_hue(image, hue_factor): + """Adjusts hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + Args: + image (PIL.Image): PIL Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL.Image: Hue adjusted image. + + """ + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + cv2 = try_import('cv2') + + dtype = image.dtype + image = image.astype(np.uint8) + hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL) + h, s, v = cv2.split(hsv_img) + + alpha = np.random.uniform(hue_factor, hue_factor) + h = h.astype(np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over="ignore"): + h += np.uint8(alpha * 255) + hsv_img = cv2.merge([h, s, v]) + return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype) + + +def adjust_saturation(image, saturation_factor): + """Adjusts color saturation of an image. + + Args: + image (np.array): Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + np.array: Saturation adjusted image. + + """ + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + cv2 = try_import('cv2') + + dtype = image.dtype + image = image.astype(np.float32) + alpha = np.random.uniform(saturation_factor, saturation_factor) + gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + gray_img = gray_img[..., np.newaxis] + img = image * alpha + gray_img * (1 - alpha) + return img.clip(0, 255).astype(dtype) + + +def hflip(image): + """Horizontally flips the given image. + + Args: + image (np.array): Image to be flipped. + + Returns: + np.array: Horizontall flipped image. + + """ + cv2 = try_import('cv2') + + return cv2.flip(image, 1) + + +def vflip(image): + """Vertically flips the given np.array. + + Args: + image (np.array): Image to be flipped. + + Returns: + np.array: Vertically flipped image. + + """ + cv2 = try_import('cv2') + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.flip(image, 0)[:, :, np.newaxis] + else: + return cv2.flip(image, 0) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + A np.array image to be padded size of (target_width, target_height) + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + np.array image: padded image + ------- + + ''' + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + height, width = image.shape[:2] + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def rotate(img, angle, interpolation, expand, center, fill): + """Rotates the image by angle. + + Args: + img (np.array): Image to be rotated. + angle (float or int): In degrees degrees counter clockwise order. + interpolation (int|str, optional): Interpolation method. If omitted, or if the + image has only one channel, it is set to cv2.INTER_NEAREST. + when use cv2 backend, support method are as following: + - "nearest": cv2.INTER_NEAREST, + - "bilinear": cv2.INTER_LINEAR, + - "bicubic": cv2.INTER_CUBIC + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + fill (3-tuple or int): RGB pixel fill value for area outside the rotated image. + If int, it is used for all channels respectively. + + Returns: + np.array: Rotated image. + + """ + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + h, w, c = img.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + if center is None: + center = (w / 2.0, h / 2.0) + M = cv2.getRotationMatrix2D(center, angle, 1) + + if expand: + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + # calculate output size + xx = [] + yy = [] + + angle = -math.radians(angle) + expand_matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + post_trans = (0, 0) + expand_matrix[2], expand_matrix[5] = transform( + -center[0] - post_trans[0], -center[1] - post_trans[1], expand_matrix + ) + expand_matrix[2] += center[0] + expand_matrix[5] += center[1] + + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, expand_matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + + M[0, 2] += (nw - w) * 0.5 + M[1, 2] += (nh - h) * 0.5 + + w, h = int(nw), int(nh) + + if len(img.shape) == 3 and img.shape[2] == 1: + return cv2.warpAffine(img, M, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill)[:, :, + np.newaxis] + else: + return cv2.warpAffine(img, M, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def get_affine_matrix(center, angle, translate, scale, shear): + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +def random_shear(image, degrees, interpolation, fill): + + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + center = (w / 2.0, h / 2.0) + shear = [-np.random.uniform(degrees[0], degrees[1]), -np.random.uniform(degrees[2], degrees[3])] + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=1.0, shear=shear) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_shift(image, shift, interpolation, fill): + + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + hrg = shift[0] + wrg = shift[1] + tx = -np.random.uniform(-hrg, hrg) * w + ty = -np.random.uniform(-wrg, wrg) * h + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(tx, ty), scale=1.0, shear=(0, 0)) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_zoom(image, zoom, interpolation, fill): + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + scale = 1 / np.random.uniform(zoom[0], zoom[1]) + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=scale, shear=(0, 0)) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + center = (w / 2.0, h / 2.0) + + angle = -float(np.random.uniform(degrees[0], degrees[1])) + + if shift is not None: + max_dx = float(shift[0] * h) + max_dy = float(shift[1] * w) + tx = -int(round(np.random.uniform(-max_dx, max_dx))) + ty = -int(round(np.random.uniform(-max_dy, max_dy))) + shift = [tx, ty] + else: + shift = [0, 0] + + if zoom is not None: + scale = 1 / np.random.uniform(zoom[0], zoom[1]) + else: + scale = 1.0 + + shear_x = shear_y = 0.0 + if shear is not None: + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear = (-shear_x, -shear_y) + + matrix = get_affine_matrix(center=center, angle=angle, translate=shift, scale=scale, shear=shear) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) diff --git a/tensorlayer/vision/functional_pil.py b/tensorlayer/vision/functional_pil.py new file mode 100644 index 000000000..124b870d9 --- /dev/null +++ b/tensorlayer/vision/functional_pil.py @@ -0,0 +1,554 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import PIL +from PIL import Image, ImageOps, ImageEnhance +import numpy as np +import colorsys +import random +import math +from numpy import sin, cos, tan +import numbers + +_pil_interp_from_str = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING +} + + +def crop(image, offset_height, offset_width, target_height, target_width): + image_width, image_height = image.size + if offset_width < 0: + raise ValueError('offset_width must be >0.') + if offset_height < 0: + raise ValueError('offset_height must be >0.') + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if offset_width + target_width > image_width: + raise ValueError('offset_width + target_width must be <= image width.') + if offset_height + target_height > image_height: + raise ValueError('offset_height + target_height must be <= image height.') + + return image.crop((offset_width, offset_height, offset_width + target_width, offset_height + target_height)) + + +def center_crop(image, size, central_fraction): + + image_width, image_height = image.size + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise TypeError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(size) + ) + + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + + elif central_fraction is not None: + if central_fraction <= 0.0 or central_fraction > 1.0: + raise ValueError('central_fraction must be within (0, 1]') + + target_height = int(central_fraction * image_height) + target_width = int(central_fraction * image_width) + + crop_top = int(round((image_height - target_height) / 2.)) + crop_left = int(round((image_width - target_width) / 2.)) + + return crop(image, crop_top, crop_left, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + + if isinstance(padding, int): + top = bottom = left = right = padding + + elif isinstance(padding, (tuple, list)): + if len(padding) == 2: + left = right = padding[0] + top = bottom = padding[1] + elif len(padding) == 4: + left = padding[0] + top = padding[1] + right = padding[2] + bottom = padding[3] + else: + raise TypeError("The size of the padding list or tuple should be 2 or 4." "But got {}".format(padding)) + else: + raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4." "But got {}".format(padding)) + + if mode not in ['constant', 'edge', 'reflect', 'symmetric']: + raise TypeError("Padding mode should be 'constant', 'edge', 'reflect', or 'symmetric'.") + + if mode == 'constant': + if image.mode == 'P': + palette = image.getpalette() + image = ImageOps.expand(image, border=padding, fill=padding_value) + image.putpalette(palette) + return image + return ImageOps.expand(image, border=padding, fill=padding_value) + + if image.mode == 'P': + palette = image.getpalette() + image = np.asarray(image) + image = np.pad(image, ((top, bottom), (left, right)), mode) + image = Image.fromarray(image) + image.putpalette(palette) + return image + + image = np.asarray(image) + # RGB image + if len(image.shape) == 3: + image = np.pad(image, ((top, bottom), (left, right), (0, 0)), mode) + # Grayscale image + if len(image.shape) == 2: + image = np.pad(image, ((top, bottom), (left, right)), mode) + + return Image.fromarray(image) + + +def resize(image, size, method): + + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + + if method not in ('nearest', 'bilinear', 'bicubic', 'box', 'lanczos', 'hamming'): + raise ValueError( + "Unknown resize method! resize method must be in " + "(\'nearest\',\'bilinear\',\'bicubic\',\'box\',\'lanczos\',\'hamming\')" + ) + if isinstance(size, int): + w, h = image.size + if (w <= h and w == size) or (h <= w and h == size): + return image + if w < h: + ow = size + oh = int(size * h / w) + return image.resize((ow, oh), _pil_interp_from_str[method]) + else: + oh = size + ow = int(size * w / h) + return image.resize((ow, oh), _pil_interp_from_str[method]) + else: + return image.resize(size[::-1], _pil_interp_from_str[method]) + + +def transpose(image, order): + + image = np.asarray(image) + if not (isinstance(order, (list, tuple)) and len(order) == 3): + raise TypeError("Order must be a list/tuple of length 3." "But got {}.".format(order)) + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose(order) + image = Image.fromarray(image) + return image + + +def hwc_to_chw(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose((2, 0, 1)) + image = Image.fromarray(image) + return image + + +def chw_to_hwc(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose((1, 2, 0)) + image = Image.fromarray(image) + return image + + +def rgb_to_hsv(image): + + return image.convert('HSV') + + +def hsv_to_rgb(image): + + return image.convert('RGB') + + +def rgb_to_gray(image, num_output_channels): + + if num_output_channels == 1: + img = image.convert('L') + elif num_output_channels == 3: + img = image.convert('L') + np_img = np.array(img, dtype=np.uint8) + np_img = np.dstack([np_img, np_img, np_img]) + img = Image.fromarray(np_img, 'RGB') + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return img + + +def adjust_brightness(image, brightness_factor): + """Adjusts brightness of an Image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + brightness_factor (float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL.Image: Brightness adjusted image. + + """ + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + + enhancer = ImageEnhance.Brightness(image) + image = enhancer.enhance(brightness_factor) + return image + + +def adjust_contrast(image, contrast_factor): + """Adjusts contrast of an Image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL.Image: Contrast adjusted image. + + """ + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + + enhancer = ImageEnhance.Contrast(image) + image = enhancer.enhance(contrast_factor) + return image + + +def adjust_hue(image, hue_factor): + """Adjusts hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + Args: + image (PIL.Image): PIL Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL.Image: Hue adjusted image. + + """ + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + input_mode = image.mode + if input_mode in {'L', '1', 'I', 'F'}: + return image + h, s, v = image.convert('HSV').split() + + np_h = np.array(h, dtype=np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over='ignore'): + np_h += np.uint8(hue_factor * 255) + h = Image.fromarray(np_h, 'L') + + image = Image.merge('HSV', (h, s, v)).convert(input_mode) + return image + + +def adjust_saturation(image, saturation_factor): + """Adjusts color saturation of an image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL.Image: Saturation adjusted image. + + """ + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + enhancer = ImageEnhance.Color(image) + image = enhancer.enhance(saturation_factor) + return image + + +def hflip(image): + """Horizontally flips the given PIL Image. + + Args: + img (PIL.Image): Image to be flipped. + + Returns: + PIL.Image: Horizontall flipped image. + + """ + + return image.transpose(Image.FLIP_LEFT_RIGHT) + + +def vflip(image): + """Vertically flips the given PIL Image. + + Args: + img (PIL.Image): Image to be flipped. + + Returns: + PIL.Image: Vertically flipped image. + + """ + + return image.transpose(Image.FLIP_TOP_BOTTOM) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + A PIL image to be padded size of (target_width, target_height) + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + PIL.Image: padded image + ------- + + ''' + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + width, height = image.size + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def rotate(image, angle, interpolation, expand, center, fill): + """Rotates the image by angle. + + Args: + img (PIL.Image): Image to be rotated. + angle (float or int): In degrees degrees counter clockwise order. + interpolation (str, optional): Interpolation method. If omitted, or if the + image has only one channel, it is set to PIL.Image.NEAREST . when use pil backend, + support method are as following: + - "nearest": Image.NEAREST, + - "bilinear": Image.BILINEAR, + - "bicubic": Image.BICUBIC + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + fill (3-tuple or int): RGB pixel fill value for area outside the rotated image. + If int, it is used for all channels respectively. + + Returns: + PIL.Image: Rotated image. + + """ + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + return image.rotate(angle, _pil_interp_from_str[interpolation], expand, center, fillcolor=fill) + + +def get_affine_matrix(center, angle, translate, scale, shear): + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +def random_shear(image, degrees, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + center = (w / 2.0, h / 2.0) + shear = [np.random.uniform(degrees[0], degrees[1]), np.random.uniform(degrees[2], degrees[3])] + + interpolation = _pil_interp_from_str[interpolation] + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=1.0, shear=shear) + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_shift(image, shift, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + center = (w / 2.0, h / 2.0) + hrg = shift[0] + wrg = shift[1] + tx = np.random.uniform(-hrg, hrg) * h + ty = np.random.uniform(-wrg, wrg) * w + matrix = get_affine_matrix(center=center, angle=0, translate=(tx, ty), scale=1.0, shear=(0, 0)) + print(matrix) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_zoom(image, zoom, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + w, h = image.size + scale = np.random.uniform(zoom[0], zoom[1]) + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=scale, shear=(0, 0)) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + angle = float(np.random.uniform(float(degrees[0]), float(degrees[1]))) + center = (w / 2.0, h / 2.0) + if shift is not None: + max_dx = float(shift[0] * w) + max_dy = float(shift[1] * h) + tx = int(round(np.random.uniform(-max_dx, max_dx))) + ty = int(round(np.random.uniform(-max_dy, max_dy))) + translations = (tx, ty) + else: + translations = (0, 0) + + if zoom is not None: + scale = float(np.random.uniform(zoom[0], zoom[1])) + else: + scale = 1.0 + + shear_x = shear_y = 0 + if shear is not None: + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear = (shear_x, shear_y) + matrix = get_affine_matrix(center=center, angle=angle, translate=translations, scale=scale, shear=shear) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) diff --git a/tensorlayer/vision/load_vision_backend.py b/tensorlayer/vision/load_vision_backend.py new file mode 100644 index 000000000..c816d3de8 --- /dev/null +++ b/tensorlayer/vision/load_vision_backend.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +from tensorlayer.backend.ops.load_backend import BACKEND + +if BACKEND == 'tensorflow': + from .tensorflow_vision import * +elif BACKEND == 'mindspore': + from .mindspore_vision import * +elif BACKEND == 'dragon': + pass +elif BACKEND == 'paddle': + from .paddle_vision import * +else: + raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/vision/mindspore_vision.py b/tensorlayer/vision/mindspore_vision.py new file mode 100644 index 000000000..bb8cbc9e1 --- /dev/null +++ b/tensorlayer/vision/mindspore_vision.py @@ -0,0 +1,625 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import mindspore as ms +from . import functional_cv2 as F_cv2 +from . import functional_pil as F_pil +import mindspore.ops as P +from mindspore.numpy import std +from PIL import Image +import PIL +import numpy as np +import numbers +import random +import math + +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(image): + return isinstance(image, Image.Image) + + +def _is_tensor_image(image): + return isinstance(image, ms.Tensor) + + +def _is_numpy_image(image): + return isinstance(image, np.ndarray) and (image.ndim in {2, 3}) + + +def _get_image_size(img): + if _is_pil_image(img): + return img.size[::-1] + elif _is_numpy_image(img): + return img.shape[:2] + else: + raise TypeError("Unexpected type {}".format(type(img))) + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def to_tensor(image, data_format='HWC'): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray. Got {}'.format(type(image))) + + image = np.asarray(image) + + if image.ndim == 2: + image = image[:, :, None] + + if data_format == 'CHW': + + image = np.transpose(image, (2, 0, 1)) + image = image / 255. + image = ms.Tensor(image, dtype=ms.float32) + else: + image = image / 255. + image = ms.Tensor(image, dtype=ms.float32) + + return image + + +def central_crop(image, size=None, central_fraction=None): + + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.center_crop(image, size, central_fraction) + + else: + + return F_cv2.center_crop(image, size, central_fraction) + + +def crop(image, offset_height, offset_width, target_height, target_width): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.crop(image, offset_height, offset_width, target_height, target_width) + + else: + + return F_cv2.crop(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.pad(image, padding, padding_value, mode) + else: + return F_cv2.pad(image, padding, padding_value, mode) + + +def resize(image, size, method): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.resize(image, size, method) + else: + return F_cv2.resize(image, size, method) + + +def transpose(image, order): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.transpose(image, order) + else: + return F_cv2.transpose(image, order) + + +def hwc_to_chw(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hwc_to_chw(image) + else: + return F_cv2.hwc_to_chw(image) + + +def chw_to_hwc(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.chw_to_hwc(image) + else: + return F_cv2.chw_to_hwc(image) + + +def rgb_to_hsv(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_hsv(image) + else: + return F_cv2.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hsv_to_rgb(image) + else: + return F_cv2.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_gray(image, num_output_channels) + else: + return F_cv2.rgb_to_gray(image, num_output_channels) + + +def adjust_brightness(image, brightness_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def adjust_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def adjust_hue(image, hue_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def adjust_saturation(image, saturation_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def hflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hflip(image) + else: + return F_cv2.hflip(image) + + +def vflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.vflip(image) + else: + return F_cv2.vflip(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + else: + return F_cv2.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + + +def normalize(image, mean, std, data_format): + + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = ms.Tensor(image) + + image = image.astype('float32', copy=False) + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + + if data_format == 'CHW': + std = np.array(std).reshape((-1, 1, 1)) + mean = np.array(mean).reshape((-1, 1, 1)) + elif data_format == 'HWC': + mean = np.array(mean).reshape((1, 1, -1)) + std = np.array(std).reshape((1, 1, -1)) + + std = ms.Tensor(std, dtype=ms.float32) + mean = ms.Tensor(mean, dtype=ms.float32) + image = (image - mean) / std + + return image + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + ''' + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = ms.Tensor(image) + + image = image.astype('float32', copy=False) + + num_pixels = ms.Tensor(image.size).astype('float32', copy=False) + image_mean_ops = ms.ops.ReduceMean(keep_dims=False) + image_mean = image_mean_ops(image) + image_mean = image_mean.reshape((1, 1, 1)) + stddev = std(image) + stddev = stddev.reshape((1, 1, 1)) + sqrt = P.Sqrt() + min_stddev = 1.0 / sqrt(num_pixels) + min_stddev = min_stddev.reshape((1, 1, 1)) + std_max = P.Maximum() + adjusted_stddev = std_max(stddev, min_stddev) + + return (image - image_mean) / adjusted_stddev + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + brightness_factor = random_factor(brightness_factor, name='brightness') + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + contrast_factor = random_factor(contrast_factor, name='contrast') + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + saturation_factor = random_factor(saturation_factor, name='saturation') + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + height, width = _get_image_size(image) + if padding is not None: + image = pad(image, padding, fill, padding_mode) + + if pad_if_needed and height < size[0]: + image = pad(image, (0, height - size[0]), fill, padding_mode) + + if pad_if_needed and width < size[1]: + image = pad(image, (width - size[1], 0), fill, padding_mode) + + height, width = _get_image_size(image) + target_height, target_width = size + + if height < target_height or width < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format( + (target_height, target_width), (height, width) + ) + ) + + offset_height = random.randint(0, height - target_height) + offset_width = random.randint(0, width - target_width) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + + def _get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = height * width + log_ratio = tuple(math.log(x) for x in ratio) + for _ in range(10): + target_area = np.random.uniform(*scale) * area + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: + # return whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + offset_height, offset_width, target_height, target_width = _get_param(image, scale, ratio) + + image = crop(image, offset_height, offset_width, target_height, target_width) + image = resize(image, size, interpolation) + + return image + + +def random_vflip(image, prob): + + if random.random() < prob: + return vflip(image) + return image + + +def random_hflip(image, prob): + + if random.random() < prob: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + angle = np.random.uniform(degrees[0], degrees[1]) + + if _is_pil_image(image): + return F_pil.rotate(image, angle, interpolation, expand, center, fill) + else: + return F_cv2.rotate(image, angle, interpolation, expand, center, fill) + + +def random_shear(image, degrees, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if _is_pil_image(image): + return F_pil.random_shear(image, degrees, interpolation, fill) + else: + return F_cv2.random_shear(image, degrees, interpolation, fill) + + +def random_shift(image, shift, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if _is_pil_image(image): + return F_pil.random_shift(image, shift, interpolation, fill) + else: + return F_cv2.random_shift(image, shift, interpolation, fill) + + +def random_zoom(image, zoom, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if _is_pil_image(image): + return F_pil.random_zoom(image, zoom, interpolation, fill) + else: + return F_cv2.random_zoom(image, zoom, interpolation, fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) + else: + return F_cv2.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) diff --git a/tensorlayer/vision/paddle_vision.py b/tensorlayer/vision/paddle_vision.py new file mode 100644 index 000000000..df6fc230f --- /dev/null +++ b/tensorlayer/vision/paddle_vision.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import paddle +from . import functional_cv2 as F_cv2 +from . import functional_pil as F_pil +import sys +import math +import numbers +import warnings +import collections +import numpy as np +from PIL import Image +from numpy import sin, cos, tan +import paddle +import random + +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(img): + return isinstance(img, Image.Image) + + +def _is_tensor_image(img): + return isinstance(img, paddle.Tensor) + + +def _is_numpy_image(img): + return isinstance(img, np.ndarray) and (img.ndim in {2, 3}) + + +def to_tensor(img, data_format='HWC'): + + return paddle.vision.functional.to_tensor(img, data_format=data_format) + + +def _get_image_size(img): + if _is_pil_image(img): + return img.size[::-1] + elif _is_numpy_image(img): + return img.shape[:2] + else: + raise TypeError("Unexpected type {}".format(type(img))) + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def central_crop(image, size=None, central_fraction=None): + + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.center_crop(image, size, central_fraction) + + else: + + return F_cv2.center_crop(image, size, central_fraction) + + +def crop(image, offset_height, offset_width, target_height, target_width): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.crop(image, offset_height, offset_width, target_height, target_width) + + else: + + return F_cv2.crop(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.pad(image, padding, padding_value, mode) + else: + return F_cv2.pad(image, padding, padding_value, mode) + + +def resize(image, size, method): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.resize(image, size, method) + else: + return F_cv2.resize(image, size, method) + + +def transpose(image, order): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.transpose(image, order) + else: + return F_cv2.transpose(image, order) + + +def hwc_to_chw(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hwc_to_chw(image) + else: + return F_cv2.hwc_to_chw(image) + + +def chw_to_hwc(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.chw_to_hwc(image) + else: + return F_cv2.chw_to_hwc(image) + + +def rgb_to_hsv(image): + + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_hsv(image) + else: + return F_cv2.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hsv_to_rgb(image) + else: + return F_cv2.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_gray(image, num_output_channels) + else: + return F_cv2.rgb_to_gray(image, num_output_channels) + + +def adjust_brightness(image, brightness_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def adjust_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def adjust_hue(image, hue_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def adjust_saturation(image, saturation_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def hflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hflip(image) + else: + return F_cv2.hflip(image) + + +def vflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.vflip(image) + else: + return F_cv2.vflip(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + else: + return F_cv2.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + + +def normalize(image, mean, std, data_format): + + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = paddle.to_tensor(image) + + image = image.astype('float32') + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + if data_format == 'CHW': + std = np.array(std).reshape((-1, 1, 1)) + mean = np.array(mean).reshape((-1, 1, 1)) + elif data_format == 'HWC': + mean = np.array(mean).reshape((1, 1, -1)) + std = np.array(std).reshape((1, 1, -1)) + + mean = paddle.to_tensor(mean).astype('float32') + std = paddle.to_tensor(std).astype('float32') + + return (image - mean) / std + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + ''' + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = paddle.to_tensor(image) + + image = image.astype('float32') + num_pixels = paddle.to_tensor(image.size, dtype='float32') + image_mean = paddle.mean(image) + + stddev = paddle.std(image) + min_stddev = 1.0 / paddle.sqrt(num_pixels) + adjusted_stddev = paddle.maximum(stddev, min_stddev) + + return (image - image_mean) / adjusted_stddev + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + brightness_factor = random_factor(brightness_factor, name='brightness') + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + contrast_factor = random_factor(contrast_factor, name='contrast') + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + saturation_factor = random_factor(saturation_factor, name='saturation') + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + if padding is not None: + + image = pad(image, padding, fill, padding_mode) + + h, w = _get_image_size(image) + + # pad the width if needed + if pad_if_needed and w < size[1]: + image = pad(image, (size[1] - w, 0), fill, padding_mode) + # pad the height if needed + if pad_if_needed and h < size[0]: + image = pad(image, (0, size[0] - h), fill, padding_mode) + + h, w = _get_image_size(image) + target_height, target_width = size + + if h < target_height or w < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format((target_height, target_width), (h, w)) + ) + + offset_height = random.randint(0, h - target_height) + offset_width = random.randint(0, w - target_width) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + + def _get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = height * width + log_ratio = tuple(math.log(x) for x in ratio) + for _ in range(10): + target_area = np.random.uniform(*scale) * area + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: + # return whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + offset_height, offset_width, target_height, target_width = _get_param(image, scale, ratio) + + image = crop(image, offset_height, offset_width, target_height, target_width) + image = resize(image, size, interpolation) + + return image + + +def random_vflip(image, prob): + + if random.random() < prob: + return vflip(image) + return image + + +def random_hflip(image, prob): + + if random.random() < prob: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + angle = np.random.uniform(degrees[0], degrees[1]) + + if _is_pil_image(image): + return F_pil.rotate(image, angle, interpolation, expand, center, fill) + else: + return F_cv2.rotate(image, angle, interpolation, expand, center, fill) + + +def random_shear(image, degrees, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if _is_pil_image(image): + return F_pil.random_shear(image, degrees, interpolation, fill) + else: + return F_cv2.random_shear(image, degrees, interpolation, fill) + + +def random_shift(image, shift, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if _is_pil_image(image): + return F_pil.random_shift(image, shift, interpolation, fill) + else: + return F_cv2.random_shift(image, shift, interpolation, fill) + + +def random_zoom(image, zoom, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if _is_pil_image(image): + return F_pil.random_zoom(image, zoom, interpolation, fill) + else: + return F_cv2.random_zoom(image, zoom, interpolation, fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) + else: + return F_cv2.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) diff --git a/tensorlayer/vision/tensorflow_vision.py b/tensorlayer/vision/tensorflow_vision.py new file mode 100644 index 000000000..95609a23a --- /dev/null +++ b/tensorlayer/vision/tensorflow_vision.py @@ -0,0 +1,1393 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import tensorflow as tf +import numpy as np +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import array_ops, random_ops +from tensorflow.python.framework import ops +from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage +from tensorflow.python.framework import dtypes +from tensorflow.python.ops.image_ops_impl import convert_image_dtype +import numbers +import PIL +from PIL import Image +import math +import scipy +from scipy import ndimage +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(image): + return isinstance(image, Image.Image) + + +def _is_numpy_image(image): + return isinstance(image, np.ndarray) and (image.ndim in {2, 3}) + + +def _get_image_size(image): + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + return height, width + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + return height, width + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def central_crop(image, size=None, central_fraction=None): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth], + or a 4-D Tensor of shape [batch_size, height, width, depth]. + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D / 4-D float Tensor, as per the input. + ------- + + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise ValueError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(type(size)) + ) + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + image = ops.convert_to_tensor(image, name='image') + rank = image.get_shape().ndims + if rank != 3 and rank != 4: + raise ValueError( + '`image` should either be a Tensor with rank = 3 or ' + 'rank = 4. Had rank = {}.'.format(rank) + ) + + def _get_dim(tensor, idx): + static_shape = tensor.get_shape().dims[idx].value + if static_shape is not None: + return static_shape, False + return array_ops.shape(tensor)[idx], True + + if rank == 3: + img_h, dynamic_h = _get_dim(image, 0) + img_w, dynamic_w = _get_dim(image, 1) + img_d = image.get_shape()[2] + else: + img_bs = image.get_shape()[0] + img_h, dynamic_h = _get_dim(image, 1) + img_w, dynamic_w = _get_dim(image, 2) + img_d = image.get_shape()[3] + + bbox_h_size = target_height + bbox_w_size = target_width + + if dynamic_h: + img_hd = math_ops.cast(img_h, dtypes.float64) + target_height = math_ops.cast(target_height, dtypes.float64) + bbox_h_start = math_ops.cast((img_hd - target_height) / 2, dtypes.int32) + else: + img_hd = float(img_h) + target_height = float(target_height) + bbox_h_start = int((img_hd - target_height) / 2) + + if dynamic_w: + img_wd = math_ops.cast(img_w, dtypes.float64) + target_width = math_ops.cast(target_width, dtypes.float64) + bbox_w_start = math_ops.cast((img_wd - target_width) / 2, dtypes.int32) + else: + img_wd = float(img_w) + target_width = float(target_width) + bbox_w_start = int((img_wd - target_width) / 2) + + if rank == 3: + bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) + bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) + else: + bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) + bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) + + image = array_ops.slice(image, bbox_begin, bbox_size) + + if rank == 3: + image.set_shape([None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d]) + else: + image.set_shape([img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d]) + return image + + elif central_fraction is not None: + return tf.image.central_crop(image, central_fraction) + + +def to_tensor(img, data_format): + '''Converts a ``image`` to tf.Tensor. + + Parameters + ---------- + img: + Image to be converted to tensor. + data_format: + Data format of output tensor, should be 'HWC' or + 'CHW'. Default: 'HWC'. + + Returns: + Tensor: Converted image. + ------- + + ''' + if not (_is_pil_image(img) or _is_numpy_image(img)): + raise TypeError('img should be PIL Image or ndarray. But got {}'.format(type(img))) + + if _is_pil_image(img): + # PIL Image + if img.mode == 'I': + image = tf.convert_to_tensor(np.array(img, np.int32, copy=False)) + elif img.mode == 'I;16': + # cast and reshape not support int16 + image = tf.convert_to_tensor(np.array(img, np.int32, copy=False)) + elif img.mode == 'F': + image = tf.convert_to_tensor(np.array(img, np.float32, copy=False)) + elif img.mode == '1': + image = 255 * tf.convert_to_tensor(np.array(img, np.uint8, copy=False)) + else: + image = tf.convert_to_tensor(np.array(img, copy=False)) + + if img.mode == 'YCbCr': + nchannel = 3 + elif img.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(img.mode) + + dtype = image.dtype + if dtype == 'tf.uint8': + image = tf.cast(image, tf.float32) / 255. + + image = tf.reshape(image, shape=[img.size[1], img.size[0], nchannel]) + if data_format == 'CHW': + image = tf.transpose(image, perm=[2, 0, 1]) + return image + else: + if img.ndim == 2: + img = img[:, :, None] + + if data_format == 'CHW': + img = tf.convert_to_tensor(img.transpose((2, 0, 1))) + else: + img = tf.convert_to_tensor(img) + + dtype = img.dtype + if dtype == 'tf.uint8': + img = tf.cast(img, tf.float32) / 255. + return img + + +def crop(image, offset_height, offset_width, target_height, target_width): + + return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + ''' + + Parameters + ---------- + image: + A 3-D or 4-D Tensor. + padding: + An integer or a list/tuple. If a single number is provided, pad all borders with this value. + If a tuple or list of 2 values is provided, pad the left and right with the first value and the top and bottom with the second value. + If 4 values are provided as a list or tuple, pad the (left , top, right, bottom) respectively. + padding_value: + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + mode: + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + Returns: + A padded Tensor. Has the same type as tensor. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image_shape = image.get_shape() + if len(image_shape) == 3: + batch_size = 0 + elif len(image_shape) == 4: + batch_size = image_shape[0] + else: + raise TypeError('Image must be a 3-D tensor or 4-D tensor.') + + if isinstance(padding, int): + padding = ((padding, padding), (padding, padding)) + elif isinstance(padding, list) or isinstance(padding, tuple): + if len(padding) == 2: + padding = ((padding[1], padding[1]), (padding[0], padding[0])) + elif len(padding) == 4: + padding = ((padding[1], padding[3]), (padding[0], padding[2])) + else: + raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) + else: + raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) + + if batch_size == 0: + padding = (padding[0], padding[1], (0, 0)) + else: + padding = ((0, 0), padding[0], padding[1], (0, 0)) + + return tf.pad(image, padding, mode=mode, constant_values=padding_value) + + +def resize(image, size, method): + ''' + + Parameters + ---------- + images: + Input images to resize + size: + The output size of the resized image. + If size is an integer, smaller edge of the image will be resized to this value with + the same image aspect ratio. + If size is a sequence of (height, width), this will be the desired output size. + method: + An image.ResizeMethod, or string equivalent shoulid be in + (bilinear, lanczos3, lanczos5, bicubic, gaussian, nearest, area, mitchellcubic). + Defaults to bilinear. + preserve_aspect_ratio: + Whether to preserve the aspect ratio. + Returns: + resized images + ------- + + ''' + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + image = ops.convert_to_tensor(image) + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + if image.get_shape().ndims == 3: + h, w, _ = image.get_shape().as_list() + elif image.get_shape().ndims == 4: + _, h, w, _ = image.get_shape().as_list() + + if isinstance(size, int): + if (w <= h and w == size) or (h <= w and h == size): + size = (h, w) + if w < h: + target_w = size + target_h = int(size * h / w) + size = (target_h, target_w) + else: + target_h = size + target_w = int(size * w / h) + size = (target_h, target_w) + image = tf.image.resize(image, size, method, preserve_aspect_ratio=False) + return convert_image_dtype(image, orig_dtype, saturate=True) + + +def transpose(image, order): + image = ops.convert_to_tensor(image) + shape = image.get_shape() + if shape.ndims == 3 or shape.ndims is None: + if len(order) != 3: + raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') + return array_ops.transpose(image, order) + elif shape.ndims == 4: + if len(order) != 4: + raise ValueError('if image is 4-D tensor, order should be a list/tuple with length of 4') + return array_ops.transpose(image, order) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def hwc_to_chw(image): + + if (len(image.shape) == 3): + return transpose(image, (2, 0, 1)) + elif (len(image.shape) == 4): + return transpose(image, (0, 3, 1, 2)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def chw_to_hwc(image): + + if (len(image.shape) == 3): + return transpose(image, (1, 2, 0)) + elif (len(image.shape) == 4): + return transpose(image, (0, 2, 3, 1)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def rgb_to_hsv(image): + + return tf.image.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + + return tf.image.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + + if num_output_channels not in (1, 3): + raise ValueError('num_output_channels should be either 1 or 3') + + image = ops.convert_to_tensor(image, name='image') + orig_dtype = image.dtype + flt_image = convert_image_dtype(image, dtypes.float32) + rgb_weights = [0.2989, 0.5870, 0.1140] + gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) + gray_float = array_ops.expand_dims(gray_float, -1) + if num_output_channels == 3: + gray_float = array_ops.stack([gray_float, gray_float, gray_float], axis=2) + return convert_image_dtype(gray_float, orig_dtype) + + +def adjust_brightness(image, brightness_factor): + ''' + Parameters + ---------- + images: + Input images to adjust brightness + brightness_factor(float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + Returns: + adjusted images + ------- + ''' + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + brightness_factor = math_ops.cast(brightness_factor, image.dtype) + image_zeros = tf.zeros_like(image) + adjusted = brightness_factor * image + (1.0 - brightness_factor) * image_zeros + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_contrast(image, contrast_factor): + ''' + Parameters + ---------- + images: + Input images to adjust contrast + contrast_factor(float): How much to adjust the contrast. Can be + any non negative number. 0 gives a gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + Returns: + adjusted images + ------- + ''' + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + contrast_factor = math_ops.cast(contrast_factor, image.dtype) + mean = tf.math.reduce_mean(tf.image.rgb_to_grayscale(image), keepdims=True) + adjusted = contrast_factor * image + (1 - contrast_factor) * mean + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_hue(image, hue_factor): + ''' + Parameters + ---------- + images(Tensor): + Input images to adjust hue + hue_factor(float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + Returns(Tensor): + Adjusted images + ------- + ''' + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + hue_factor = math_ops.cast(hue_factor, image.dtype) + image = tf.image.rgb_to_hsv(image) + h, s, v = tf.split(image, num_or_size_splits=[1, 1, 1], axis=2) + h = (h + hue_factor) % 1.0 + image = tf.concat((h, s, v), axis=2) + adjusted = tf.image.hsv_to_rgb(image) + + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_saturation(image, saturation_factor): + ''' + Parameters + ---------- + images(Tensor): + Input images to adjust saturation + contrast_factor(float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + Returns(Tensor): + Adjusted images + ------- + ''' + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + saturation_factor = math_ops.cast(saturation_factor, image.dtype) + gray_image = tf.image.rgb_to_grayscale(image) + adjusted = saturation_factor * image + (1 - saturation_factor) * gray_image + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def hflip(image): + ''' + + Parameters + ---------- + image(Tensor): + Input images to flip an image horizontally (left to right) + + Returns(Tensor): + Flipped images + ------- + + ''' + return tf.image.flip_left_right(image) + + +def vflip(image): + ''' + + Parameters + ---------- + image(Tensor): + Input images to flip an image vertically (up to down) + + Returns(Tensor): + Flipped images + ------- + + ''' + return tf.image.flip_up_down(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor + of shape `[height, width, channels]`. + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + If `image` was 4-D, a 4-D float Tensor of shape + `[batch, target_height, target_width, channels]` + If `image` was 3-D, a 3-D float Tensor of shape + `[target_height, target_width, channels]` + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image.get_shape() + elif image_shape.ndims == 4: + batch, height, width, channels = image.get_shape() + else: + raise ValueError('\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) + + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def normalize(image, mean, std, data_format): + ''' + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + image = ops.convert_to_tensor(image, dtype=tf.float32) + image = _AssertAtLeast3DImage(image) + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + + if data_format == 'CHW': + std = np.float32(np.array(std).reshape((-1, 1, 1))) + mean = np.float32(np.array(mean).reshape((-1, 1, 1))) + elif data_format == 'HWC': + mean = np.float32(np.array(mean).reshape((1, 1, -1))) + std = np.float32(np.array(std).reshape((1, 1, -1))) + + mean = ops.convert_to_tensor(mean, dtype=image.dtype) + std = ops.convert_to_tensor(std, dtype=image.dtype) + image -= mean + image = math_ops.divide(image, std) + return image + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + + Returns: + A Tensor with the same shape as image and its dtype is float32. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = math_ops.cast(image, dtype=tf.float32) + return tf.image.per_image_standardization(image) + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + brightness_factor = random_factor(brightness_factor, name='brightness') + + return adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + ''' + Perform a random contrast on the input image. + Parameters + ---------- + image: + Input images to adjust random contrast + contrast_factor: + Contrast adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast), 1+contrast]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + contrast_factor = random_factor(contrast_factor, name='contrast') + + return adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + ''' + Perform a random saturation on the input image. + Parameters + ---------- + image: + Input images to adjust random saturation + saturation_factor: + Saturation adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation), 1+saturation]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + + saturation_factor = random_factor(saturation_factor, name='saturation') + + return adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + ''' + Perform a random contrast on the input image. + Parameters + ---------- + image: + Input images to adjust random contrast + brightness_factor: + Contrast adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast), 1+contrast]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + return adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + ''' + + Parameters + ---------- + image: + Input images to crop and pad if needed. + size: + Desired output size of the crop. If size is an int instead of sequence like (h, w), + a square crop (size, size) is made. If provided a sequence of length 1, + it will be interpreted as (size[0], size[0]). + padding: + Optional, padding on each border of the image. Default is None. + If a single int is provided this is used to pad all borders. + If sequence of length 2 is provided this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. + pad_if_needed: + It will pad the image if smaller than the desired size to avoid raising an exception. + Since cropping is done after padding, the padding seems to be done at a random offset. + fill: + Pixel fill value for constant fill. Default is 0. + padding_mode: + Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + + Returns: + cropped images. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + _AssertAtLeast3DImage(image) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size') + if padding is not None: + image = pad(image, padding, fill, padding_mode) + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + + if pad_if_needed and height < size[0]: + image = pad(image, (0, size[0] - height), fill, padding_mode) + if pad_if_needed and width < size[1]: + image = pad(image, (size[1] - width, 0), fill, padding_mode) + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + + target_height, target_width = size + if height < target_height or width < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format( + (target_height, target_width), (height, width) + ) + ) + + if target_height == height and target_width == width: + return crop(image, 0, 0, target_height, target_width) + + offset_height = random_ops.random_uniform([], minval=0, maxval=height - target_height + 1, dtype=size.dtype) + + offset_width = random_ops.random_uniform([], minval=0, maxval=width - target_width + 1, dtype=size.dtype) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + '''Crop the given image to random size and aspect ratio. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + size: + Target size of output image, with (height, width) shape. if size is int, target size will be (size, size). + scale: + Range of size of the origin size cropped. Default: (0.08, 1.0) + ratio: + Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33) + interpolation: + Interpolation method. Default: 'bilinear'. + + Returns: + Randomly cropped and resized image. + ------- + + ''' + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + def get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = math_ops.cast(height * width, dtype=dtypes.float32) + ratio = ops.convert_to_tensor(ratio, dtype=dtypes.float32) + log_ratio = math_ops.log(ratio) + for _ in range(10): + target_area = area * random_ops.random_uniform([], minval=scale[0], maxval=scale[1], dtype=dtypes.float32) + aspect_ratio = math_ops.exp( + random_ops.random_uniform([], minval=log_ratio[0], maxval=log_ratio[1], dtype=dtypes.float32) + ) + + target_width = math_ops.to_int32(math_ops.round(math_ops.sqrt(target_area * aspect_ratio))) + + target_height = math_ops.to_int32(math_ops.round(math_ops.sqrt(target_area / aspect_ratio))) + + if 0 < target_width <= width and 0 < target_height <= height: + offset_height = random_ops.random_uniform( + [], minval=0, maxval=height - target_height + 1, dtype=dtypes.int32 + ) + + offset_width = random_ops.random_uniform( + [], minval=0, maxval=width - target_width + 1, dtype=dtypes.int32 + ) + + return offset_height, offset_width, target_height, target_width + + height = ops.convert_to_tensor(height, dtype=dtypes.float32) + width = ops.convert_to_tensor(width, dtype=dtypes.float32) + in_ratio = width / height + if in_ratio < ratio[0]: + target_width = width + target_height = math_ops.to_int32(math_ops.round(target_width / ratio[0])) + elif in_ratio > ratio[1]: + target_height = height + target_width = math_ops.to_int32(math_ops.round(target_height / ratio[1])) + else: + target_height = height + target_width = width + offset_height = (height - target_height) // 2 + offset_width = (width - target_width) // 2 + return offset_height, offset_width, target_height, target_width + + offset_height, offset_width, target_heigth, target_width = get_param(image, scale, ratio) + image = crop(image, offset_height, offset_width, target_heigth, target_width) + image = resize(image, size, interpolation) + return image + + +def random_vflip(image, prob): + '''Vertically flip the input image randomly with a given probability. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + prob: + probability of the image being flipped. Default value is 0.5 + Returns: + A tensor of the same type and shape as image. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + random_prob = random_ops.random_uniform([], minval=0, maxval=1.0, dtype=dtypes.float32) + flip_flag = math_ops.less(random_prob, prob) + if flip_flag: + return vflip(image) + return image + + +def random_hflip(image, prob): + '''horizontally flip the input image randomly with a given probability. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + prob: + probability of the image being flipped. Default value is 0.5 + Returns: + A tensor of the same type and shape as image. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + random_prob = random_ops.random_uniform([], minval=0, maxval=1.0, dtype=dtypes.float32) + flip_flag = math_ops.less(random_prob, prob) + if flip_flag: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + '''Rotate the image by angle. + + Parameters + ---------- + image: + Input tensor. Must be 3D. + degrees: + Range of degrees to select from.If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + interpolation: + Points outside the boundaries of the input are filled according to the given mode + (one of {'nearest', 'bilinear'}). + expand: + Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center: + Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill: + Pixel fill value for the area outside the rotated image. + Default is ``0``. If given a number, the value is used for all bands respectively. + + Returns: + Rotated image tensor. + ------- + + ''' + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + theta = np.random.uniform(degrees[0], degrees[1]) + angle = -math.radians(theta) + rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) + + if center is None: + rotn_center = (w / 2.0, h / 2.0) + else: + rotn_center = center + + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform(-rotn_center[0] - 0, -rotn_center[1] - 0, matrix) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) + w, h = nw, nh + + image = np.rollaxis(image, 2, 0) + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + final_offset = np.array([matrix[5], matrix[2]]) + + channel_images = [ + ndimage.interpolation.affine_transform( + x_channel, rotation_matrix, final_offset, output_shape=(h, w), order=3, mode='constant', cval=0 + ) for x_channel in image + ] + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, image.shape[2])) + fill = np.tile(fill, (image.shape[0], image.shape[1], 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def transform_matrix_offset_center(matrix, x, y): + o_x = float(x) / 2 + 0.5 + o_y = float(y) / 2 + 0.5 + offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) + reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) + transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) + return transform_matrix + + +def random_shear(image, degrees, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + shear = [np.random.uniform(degrees[0], degrees[1]), np.random.uniform(degrees[2], degrees[3])] + shear = np.deg2rad(shear) + shear_matrix = np.array( + [[math.cos(shear[1]), math.sin(shear[1]), 0], [math.sin(shear[0]), math.cos(shear[0]), 0], [0, 0, 1]] + ) + transform_matrix = shear_matrix + transform_matrix = transform_matrix_offset_center(transform_matrix, h, w) + + shear_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, shear_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_shift(image, shift, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + hrg = shift[0] + wrg = shift[1] + tx = -np.random.uniform(-hrg, hrg) * w + ty = -np.random.uniform(-wrg, wrg) * h + + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + + transform_matrix = transform_matrix_offset_center(shift_matrix, h, w) + shift_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, shift_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_zoom(image, zoom, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + zoom_factor = 1 / np.random.uniform(zoom[0], zoom[1]) + zoom_matrix = np.array([[zoom_factor, 0, 0], [0, zoom_factor, 0], [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w) + zoom_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, zoom_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + theta = np.random.uniform(degrees[0], degrees[1]) + theta = np.deg2rad(theta) + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) + transform_matrix = rotation_matrix + + if shift is not None: + max_dx = float(shift[0] * w) + max_dy = float(shift[1] * h) + tx = -int(round(np.random.uniform(-max_dx, max_dx))) + ty = -int(round(np.random.uniform(-max_dy, max_dy))) + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + transform_matrix = np.dot(transform_matrix, shift_matrix) + + if shear is not None: + shear_x = shear_y = 0 + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear_x = np.deg2rad(shear_x) + shear_y = np.deg2rad(shear_y) + shear_matrix = np.array( + [[math.cos(shear_y), math.sin(shear_y), 0], [math.sin(shear_x), math.cos(shear_x), 0], [0, 0, 1]] + ) + transform_matrix = np.dot(transform_matrix, shear_matrix) + + if zoom is not None: + zoom = 1 / float(np.random.uniform(zoom[0], zoom[1])) + zoom_matrix = np.array([[zoom, 0, 0], [0, zoom, 0], [0, 0, 1]]) + + transform_matrix = np.dot(transform_matrix, zoom_matrix) + + transform_matrix = transform_matrix_offset_center(transform_matrix, h, w) + image = np.rollaxis(image, 2, 0) + finale_affine_matrix = transform_matrix[:2, :2] + finale_offset = transform_matrix[:2, 2] + dummy = np.ones((1, h, w), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform( + x_channel, finale_affine_matrix, finale_offset, order=3, mode='constant', cval=0 + ) for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image diff --git a/tensorlayer/vision/transforms.py b/tensorlayer/vision/transforms.py new file mode 100644 index 000000000..89f1ca4ef --- /dev/null +++ b/tensorlayer/vision/transforms.py @@ -0,0 +1,1256 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from . import load_vision_backend as F +import numbers +import numpy as np +__all__ = [ + 'Crop', + 'CentralCrop', + 'HsvToRgb', + 'AdjustBrightness', + 'AdjustContrast', + 'AdjustHue', + 'AdjustSaturation', + 'FlipHorizontal', + 'FlipVertical', + 'RgbToGray', + 'PadToBoundingbox', + 'Pad', + 'Normalize', + 'StandardizePerImage', + 'RandomBrightness', + 'RandomContrast', + 'RandomHue', + 'RandomSaturation', + 'RandomCrop', + 'Resize', + 'RgbToHsv', + 'Transpose', + 'RandomRotation', + 'RandomShift', + 'RandomShear', + 'RandomZoom', + 'RandomFlipVertical', + 'RandomFlipHorizontal', + 'HWC2CHW', + 'CHW2HWC', + 'ToTensor', + 'Compose', + 'RandomResizedCrop', + 'RandomAffine', + 'ColorJitter', +] + + +class ToTensor(object): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + + Parameters + ---------- + data_format : str + Data format of output tensor, should be 'HWC' or 'CHW'. Default: 'HWC'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.ToTensor(data_format='HWC') + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, data_format='HWC'): + + if not data_format in ['CHW', 'HWC']: + raise ValueError('data_format should be CHW or HWC. Got {}'.format(data_format)) + + self.data_format = data_format + + def __call__(self, image): + + F.to_tensor(image, self.data_format) + + +class CentralCrop(object): + """Crops the given image at the center.If the size is given, image will be cropped as size. + If the central_fraction is given, image will cropped as (H * central_fraction, W * fraction). + Size has a higher priority. + + Parameters + ---------- + size : int or sequence of int + The output size of the cropped image. + If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + central_fraction : float + float (0, 1], fraction of size to crop + + Examples + ---------- + With TensorLayer + + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CentralCrop(size = (50, 50)) + >>> image = transform(image) + >>> print(image) + >>> image shape : (50, 50, 3) + + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CentralCrop(central_fraction=0.5) + >>> image = transform(image) + >>> print(image) + >>> image shape : (112, 112, 3) + + """ + + def __init__(self, size=None, central_fraction=None): + + self.central_fraction = central_fraction + self.size = size + + def __call__(self, image): + + F.central_crop(image, self.size, self.central_fraction) + + +class Compose(object): + """Composes several transforms together. + + Parameters + ---------- + transforms : list of 'transform' objects + list of transforms to compose. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Compose([tl.vision.transforms.ToTensor(data_format='HWC'),tl.vision.transforms.CentralCrop(size = 100)]) + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, transforms): + + self.transforms = transforms + + def __call__(self, data): + + for t in self.transforms: + + data = t(data) + + return data + + +class Crop(object): + """Crops an image to a specified bounding box. + + Parameters + ---------- + offset_height : int + Vertical coordinate of the top-left corner of the bounding box in image. + offset_width: int + Horizontal coordinate of the top-left corner of the bounding box in image. + target_height: int + Height of the bounding box. + target_width: int + Width of the bounding box. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Crop(offset_height=10, offset_width=10, target_height=100, target_width=100) + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, offset_height, offset_width, target_height, target_width): + + self.offset_height = offset_height + self.offset_width = offset_width + self.target_height = target_height + self.target_width = target_width + + def __call__(self, image): + + return F.crop(image, self.offset_height, self.offset_width, self.target_height, self.target_width) + + +class Pad(object): + """Pad the given image on all sides with the given "pad" value. + + Parameters + ---------- + padding : int or sequenece + Padding on each border. + If a single int is provided, this is used to pad all borders. + If sequence of length 2 is provided, this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided, this is the padding for the left, top, right and bottom borders respectively. + padding_value : number or sequenece + Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + This value is only used when the mode is constant. + mode : str + Type of padding. Default is constant. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Pad(padding=10, padding_value=0, mode='constant') + >>> image = transform(image) + >>> print(image) + >>> image shape : (244, 244, 3) + + """ + + def __init__(self, padding, padding_value=0, mode='constant'): + + self.padding = padding + self.padding_value = padding_value + self.mode = mode + + def __call__(self, image): + + return F.pad(image, self.padding, self.padding_value, self.mode) + + +class Resize(object): + """Resize the input image to the given size. + + Parameters + ---------- + size : int or sequenece + Desired output size. + If size is a sequence like (h, w), output size will be matched to this. + If size is an int, smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to (size * height / width, size). + interpolation : str + Interpolation method. Default: 'bilinear'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Resize(size = (100,100), interpolation='bilinear') + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, size, interpolation='bilinear'): + + self.size = size + self.interpolation = interpolation + + def __call__(self, image): + + return F.resize(image, self.size, self.interpolation) + + +class Transpose(object): + """Transpose image(s) by swapping dimension. + + Parameters + ---------- + order : sequenece of int + Desired output dimension order. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Transpose(order=(2, 0 ,1)) + >>> image = transform(image) + >>> print(image) + >>> image shape : (3, 224, 224) + + """ + + def __init__(self, order): + + self.order = order + + def __call__(self, image): + + return F.transpose(image, self.order) + + +class HWC2CHW(object): + """Transpose a image shape (H, W, C) to shape (C, H, W). + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.HWC2CHW() + >>> image = transform(image) + >>> print(image) + >>> image shape : (3, 224, 224) + + """ + + def __call__(self, image): + + F.hwc_to_chw(image) + + +class CHW2HWC(object): + """Transpose a image shape (C, H, W) to shape (H, W, C). + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(3, 224, 224) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CHW2HWC() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.chw_to_hwc(image) + + +class RgbToHsv(object): + """Converts a image from RGB to HSV. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RgbToHsv() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.rgb_to_hsv(image) + + +class HsvToRgb(object): + """Converts a image from HSV to RGB. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.HsvToRgb() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.hsv_to_rgb(image) + + +class RgbToGray(object): + """Converts a image from RGB to grayscale. + + Parameters + ---------- + num_output_channels: int + (1 or 3) number of channels desired for output image. Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RgbToGray(num_output_channels=1) + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 1) + + """ + + def __init__(self, num_output_channels=1): + + self.num_output_channels = num_output_channels + + def __call__(self, image): + + F.rgb_to_gray(image, self.num_output_channels) + + +class AdjustBrightness(object): + """Adjust brightness of the image. + + Parameters + ---------- + brightness_factor: float + How much to adjust the brightness. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustBrightness(brightness_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness_factor=1): + self.brightness_factor = brightness_factor + + def __call__(self, image): + + return F.adjust_brightness(image, self.brightness_factor) + + +class AdjustContrast(object): + """Adjust contrast of the image. + + Parameters + ---------- + contrast_factor: float + How much to adjust the contrast. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustContrast(contrast_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, contrast_factor=1): + + self.contrast_factor = contrast_factor + + def __call__(self, image): + + return F.adjust_contrast(image, self.contrast_factor) + + +class AdjustHue(object): + """Adjust hue of the image. + + Parameters + ---------- + hue_factor: float + How much to shift the hue channel. Should be in [-0.5, 0.5]. + 0.5 and -0.5 give complete reversal of hue channel in HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image with complementary colors while 0 gives the original image. + Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustHue(hue_factor=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, hue_factor=0): + + self.hue_factor = hue_factor + + def __call__(self, image): + + return F.adjust_hue(image, self.hue_factor) + + +class AdjustSaturation(object): + """Adjust saturation of the image. + + Parameters + ---------- + saturation_factor: float + How much to adjust the saturation. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustSaturation(saturation_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, saturation_factor=1): + + self.saturation_factor = saturation_factor + + def __call__(self, image): + + return F.adjust_saturation(image, self.saturation_factor) + + +class FlipHorizontal(object): + """Flip an image horizontally. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.FlipHorizontal() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.hflip(image) + + +class FlipVertical(object): + """Flip an image vertically. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.FlipVertical() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.vflip(image) + + +class PadToBoundingbox(object): + """Pad image with the specified height and width to target size. + + Parameters + ---------- + offset_height: int + Number of rows to add on top. + offset_width: int + Number of columns to add on the left. + target_height: int + Height of output image. + target_width: int + Width of output image. + padding_value: int or sequence + value to pad. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.PadToBoundingbox(offset_height=10, offset_width=10, target_height=300, target_width=300, padding_value=0) + >>> image = transform(image) + >>> print(image) + >>> image shape : (300, 300, 3) + """ + + def __init__(self, offset_height, offset_width, target_height, target_width, padding_value=0): + self.offset_height = offset_height + self.offset_width = offset_width + self.target_height = target_height + self.target_width = target_width + self.padding_value = padding_value + + def __call__(self, image): + + return F.padtoboundingbox( + image, self.offset_height, self.offset_width, self.target_height, self.target_width, self.padding_value + ) + + +class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + + Parameters + ---------- + mean: number or sequence + If mean is a number, mean will be applied for all channels. Sequence of means for each channel. + std: number or sequnece + If std is a number, std will be applied for all channels.Sequence of standard deviations for each channel. + data_format: str + Data format of input image, should be 'HWC' or 'CHW'. Default: 'HWC'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Normalize(mean = (155.0, 155.0, 155.0), std = (75.0, 75.0, 75.0),data_format='HWC') + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, mean, std, data_format='HWC'): + + self.mean = mean + self.std = std + self.data_format = data_format + + def __call__(self, image): + + return F.normalize(image, self.mean, self.std, self.data_format) + + +class StandardizePerImage(object): + """For each 3-D image x in image, computes (x - mean) / adjusted_stddev, where mean is the average of all values in x. + adjusted_stddev = max(stddev, 1.0/sqrt(N)) is capped away from 0 to protect against division by 0 when handling uniform images. + N is the number of elements in x. stddev is the standard deviation of all values in x + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.StandardizePerImage() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.standardize(image) + + +class RandomBrightness(object): + """Random adjust brightness of the image. + + Parameters + ---------- + brightness_factor: float or sequence + Brightness adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness_factor), 1+brightness_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomBrightness(brightness_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness_factor=(1, 1)): + self.brighthness_factor = brightness_factor + + def __call__(self, image): + + return F.random_brightness(image, self.brighthness_factor) + + +class RandomContrast(object): + """Random adjust contrast of the image. + + Parameters + ---------- + contrast_factor: float or sequence + Contrast adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast_factor), 1+contrast_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomContrast(contrast_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, contrast_factor=(1, 1)): + + self.contrast_factor = contrast_factor + + def __call__(self, image): + + return F.random_contrast(image, self.contrast_factor) + + +class RandomSaturation(object): + """Random adjust saturation of the image. + + Parameters + ---------- + saturation_factor: float or sequence + Saturation adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation_factor), 1+saturation_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomSaturation(saturation_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, saturation_factor=(1, 1)): + + self.saturation_factor = saturation_factor + + def __call__(self, image): + + return F.random_saturation(image, self.saturation_factor) + + +class RandomHue(object): + """Random adjust hue of the image. + + Parameters + ---------- + hue_factor: float or sequence + Hue adjustment factor (default=(0, 0)). + If it is a float, the factor is uniformly chosen from the range [-hue_factor, hue_factor]. + If it is a sequence, it should be [min, max] for the range.Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomHue(hue_factor=(-0.5, 0.5)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, hue_factor=(0, 0)): + + self.hue_factor = hue_factor + + def __call__(self, image): + + return F.random_hue(image, self.hue_factor) + + +class RandomCrop(object): + """Crop the given image at a random location. + + Parameters + ---------- + size: int or sequence + Desired output size of the crop. + If size is an int instead of sequence like (h, w), a square crop (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + padding: int or sequence, optional + Optional padding on each border of the image. + If a single int is provided this is used to pad all borders. + If sequence of length 2 is provided this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided, it is used to pad left, top, right, bottom borders respectively. + Default: 0. + pad_if_needed: boolean + It will pad the image if smaller than the desired size to avoid raising an exception. + Since cropping is done after padding, the padding seems to be done at a random offset. + fill: number or sequence + Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + padding_mode: str + Type of padding. Default is constant. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomCrop(size=50, padding=10, pad_if_needed=False, fill=0, padding_mode='constant') + >>> image = transform(image) + >>> print(image) + >>> image shape : (70,70,3) + + """ + + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'): + + self.size = size + self.padding = padding + self.pad_if_needed = pad_if_needed + self.fill = fill + self.padding_mode = padding_mode + + def __call__(self, image): + + return F.random_crop( + image, + size=self.size, + padding=self.padding, + pad_if_needed=self.pad_if_needed, + fill=self.fill, + padding_mode=self.padding_mode, + ) + + +class RandomResizedCrop(object): + """Crop the given image to random size and aspect ratio. + + Parameters + ---------- + size: int or sequence + Desired output size of the crop. + If size is an int instead of sequence like (h, w), a square crop (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + scale: tuple of float + scale range of the cropped image before resizing, relatively to the origin image. + ratio: tuple of float + aspect ratio range of the cropped image before resizing. + interpolation: str + Type of interpolation. Default is bilinear. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomResizedCrop(size = (100, 100), scale = (0.08, 1.0), ratio = (3./4.,4./3.), interpolation = 'bilinear') + >>> image = transform(image) + >>> print(image) + >>> image shape : (100,100,3) + + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear'): + self.size = size + self.scale = scale + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, image): + + return F.random_resized_crop(image, self.size, self.scale, self.ratio, self.interpolation) + + +class RandomFlipVertical(object): + """Vertically flip the given image randomly with a given probability. + + Parameters + ---------- + prob: float + probability of the image being flipped. Default value is 0.5 + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomFlipVertical(prob = 0.5) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, prob=0.5): + + self.prob = prob + + def __call__(self, image): + + return F.random_vflip(image, self.prob) + + +class RandomFlipHorizontal(object): + """Horizontally flip the given image randomly with a given probability. + + Parameters + ---------- + prob: float + probability of the image being flipped. Default value is 0.5 + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomFlipHorizontal(prob = 0.5) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, prob=0.5): + + self.prob = prob + + def __call__(self, image): + + return F.random_hflip(image, self.prob) + + +class RandomRotation(object): + """Rotate the image by random angle. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, the range of degrees will be (-degrees, +degrees). + If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]). + interpolation: str + Interpolation method. Default is 'bilinear'. + expand: boolean + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center: sequence or None + Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill: number or sequence + Pixel fill value for the area outside the rotated image. Default is 0. + + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomRotation(degrees=30, interpolation='bilinear', expand=False, center=None, fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, interpolation='bilinear', expand=False, center=None, fill=0): + + self.degrees = degrees + self.interpolation = interpolation + self.expand = expand + self.center = center + self.fill = fill + + def __call__(self, image): + + return F.random_rotation(image, self.degrees, self.interpolation, self.expand, self.center, self.fill) + + +class RandomShear(object): + """Shear the image by random angle. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied. + If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied. + If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomShear(degrees=30, interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, interpolation='bilinear', fill=0): + + self.degrees = degrees + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_shear(image, self.degrees, self.interpolation, self.fill) + + +class RandomShift(object): + """Shift the image by random translations. + + Parameters + ---------- + shift: list or tuple + Maximum absolute fraction for horizontal and vertical translations. + shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a. + vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomShift(shift=(0.2, 0.2), interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, shift, interpolation='bilinear', fill=0): + + self.shift = shift + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_shift(image, self.shift, self.interpolation, self.fill) + + +class RandomZoom(object): + """Zoom the image by random scale. + + Parameters + ---------- + zoom: list or tuple + Scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomZoom(zoom=(0.2, 0.5), interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, zoom, interpolation='bilinear', fill=0): + + self.zoom = zoom + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_zoom(image, self.zoom, self.interpolation, self.fill) + + +class RandomAffine(object): + """Random affine transformation of the image keeping center invariant. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, the range of degrees will be (-degrees, +degrees). + If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]). + Set to 0 to deactivate rotations. + shift: sequence or None + Maximum absolute fraction for horizontal and vertical translations. + shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a. + vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b. + Will not shift by default. + shear: number or sequnence or None + Range of degrees to select from. + If degrees is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied. + If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied. + If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default. + zoom: sequence or None + Scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b. + Will not zoom by default. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomAffine(degrees=30, shift=(0.2,0.2), zoom=(0.2, 0.5), shear=30, interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, shift=None, zoom=None, shear=None, interpolation='bilinear', fill=0): + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}.'.format(degrees)) + degrees = [-degrees, degrees] + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise TypeError('If degrees is a list or tuple, it should be length of 2.' 'But got {}'.format(degrees)) + + self.degrees = (float(x) for x in degrees) + + if shift is not None: + if not (isinstance(shift, (list, tuple)) and len(shift) == 2): + raise TypeError("shift should be a list or tuple of length 2." "But got {}.".format(shift)) + + for s in shift: + if not (0.0 <= s <= 1.0): + raise ValueError('shift values should be between 0 and 1.' 'But got {}.'.format(shift)) + self.shift = shift + + if zoom is not None: + if not (isinstance(zoom, (list, tuple)) and len(zoom) == 2): + raise TypeError("zoom should be a list or tuple of length 2." "But got {}.".format(zoom)) + + if not (0 <= zoom[0] <= zoom[1]): + raise ValueError("zoom valuse should be positive, and zoom[1] should be less than zoom[0].") + + self.zoom = zoom + + if shear is not None: + if isinstance(shear, numbers.Number): + if shear < 0: + raise ValueError("If shear is a single number, it must be positive.") + shear = [-shear, shear] + elif not (isinstance(shear, (list, tuple)) and len(shear) in (2, 4)): + raise TypeError('shear should be a list or tuple of length (2, 4).') + + self.shear = (float(x) for x in shear) + + self.interpolation = interpolation + + if fill is None: + fill = 0 + elif not isinstance(fill, (list, tuple, numbers.Number)): + raise TypeError("Fill should be either a sequence or a number.") + + self.fill = fill + + def __call__(self, image): + + return F.random_affine(image, self.degrees, self.shift, self.zoom, self.shear, self.interpolation, self.fill) + + +class ColorJitter(object): + """Randomly change the brightness, contrast, saturation and hue of an image. + + Parameters + ---------- + brightness: float or sequence + Brightness adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness_factor), 1+brightness_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + contrast: float or sequence + Contrast adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast_factor), 1+contrast_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + saturation: float or sequence + Saturation adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation_factor), 1+saturation_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + hue: float or sequence + Hue adjustment factor (default=(0, 0)). + If it is a float, the factor is uniformly chosen from the range [-hue_factor, hue_factor]. + If it is a sequence, it should be [min, max] for the range.Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.ColorJitter(brightness=(1,5), contrast=(1,5), saturation=(1,5), hue=(-0.2,0.2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False) + + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - float(value), center + float(value)] + if clip_first_on_zero: + value[0] = max(value[0], 0.0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + + if value[0] == value[1] == center: + value = None + return value + + @staticmethod + def get_params(brightness, contrast, saturation, hue): + fn_idx = np.random.permutation(np.arange(4)) + + b = None if brightness is None else float(np.random.uniform(brightness[0], brightness[1])) + c = None if contrast is None else float(np.random.uniform(contrast[0], contrast[1])) + s = None if saturation is None else float(np.random.uniform(saturation[0], saturation[1])) + h = None if hue is None else float(np.random.uniform(hue[0], hue[1])) + + return fn_idx, b, c, s, h + + def __call__(self, image): + + fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \ + self.get_params(self.brightness, self.contrast, self.saturation, self.hue) + + for fn_id in fn_idx: + if fn_id == 0 and brightness_factor is not None: + image = F.adjust_brightness(image, brightness_factor) + elif fn_id == 1 and contrast_factor is not None: + image = F.adjust_contrast(image, contrast_factor) + elif fn_id == 2 and saturation_factor is not None: + image = F.adjust_saturation(image, saturation_factor) + elif fn_id == 3 and hue_factor is not None: + image = F.adjust_hue(image, hue_factor) + + return image From 0be83f9c0ae509a94a3e9f59ede5f16383182190 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Tue, 22 Jun 2021 15:19:43 +0800 Subject: [PATCH 17/36] rename cross entropy --- .../tutorial_SequentialLayer.py | 2 +- ...torial_automatic_inference_input _shape.py | 12 +++---- .../tutorial_cifar10_cnn_mindspore_backend.py | 3 +- ...tutorial_cifar10_cnn_tensorflow_backend.py | 6 ++-- ...tutorial_mnist_mlp_paddlepaddle_backend.py | 2 +- .../tutorial_mnist_mlp_tensorflow_backend.py | 12 +++---- .../basic_tutorials/tutorial_mnist_simple.py | 3 +- .../tutorial_nested_usage_of_Layer.py | 7 ++-- tensorlayer/cost/mindspore_cost.py | 32 +++++++++++++++++-- tensorlayer/cost/paddle_cost.py | 6 ++-- tensorlayer/cost/tensorflow_cost.py | 6 ++-- tensorlayer/layers/deprecated.py | 4 +++ 12 files changed, 63 insertions(+), 32 deletions(-) diff --git a/examples/basic_tutorials/tutorial_SequentialLayer.py b/examples/basic_tutorials/tutorial_SequentialLayer.py index dd5e97249..729d5b1c5 100644 --- a/examples/basic_tutorials/tutorial_SequentialLayer.py +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -37,7 +37,7 @@ def generator_train(): train_ds = tl.dataflow.Batch(train_ds,batch_size) -model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file diff --git a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py index 771fb18ee..6cc8fbb01 100644 --- a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py +++ b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py @@ -56,7 +56,7 @@ def forward(self, x, foo=None): ## compute outputs _logits = MLP(X_batch) ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -66,7 +66,7 @@ def forward(self, x, foo=None): train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): _logits = MLP(X_batch) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + train_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" train loss: {}".format(train_loss / n_iter)) @@ -75,7 +75,7 @@ def forward(self, x, foo=None): val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) @@ -86,8 +86,8 @@ def forward(self, x, foo=None): test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): _logits = MLP(X_batch, foo=1) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 -print(" test foo=1 loss: {}".format(val_loss / n_iter)) -print(" test foo=1 acc: {}".format(val_acc / n_iter)) +print(" test foo=1 loss: {}".format(test_loss / n_iter)) +print(" test foo=1 acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py index e765f3aa5..37a45349e 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- + import os -# os.environ['TL_BACKEND'] = 'tensorflow' os.environ['TL_BACKEND'] = 'mindspore' + import time import numpy as np import multiprocessing diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py index cbd47e2d2..edfa9f8a4 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py @@ -149,7 +149,7 @@ def _map_fn_test(img, target): # compute outputs _logits = net(X_batch) # compute loss and update model - _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss_ce = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss_ce, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -169,7 +169,7 @@ def _map_fn_test(img, target): val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) @@ -180,7 +180,7 @@ def _map_fn_test(img, target): test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py index 224b40c56..2ab00ba13 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py @@ -39,7 +39,7 @@ def forward(self, x): optimizer = tl.optimizers.Adam(learning_rate=0.001) metric = tl.metric.Accuracy() -model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) +model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) model.train(n_epoch=2, train_dataset=train_loader, print_freq=5, print_train_batch=True) model.save_weights('./model_mlp.npz', format='npz_dict') model.load_weights('./model_mlp.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py index e3524e161..d959eea36 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py @@ -54,7 +54,7 @@ def forward(self, x, foo=None): ## compute outputs _logits = MLP(X_batch) ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -64,7 +64,7 @@ def forward(self, x, foo=None): train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): _logits = MLP(X_batch) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + train_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" train loss: {}".format(train_loss / n_iter)) @@ -73,7 +73,7 @@ def forward(self, x, foo=None): val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) @@ -84,8 +84,8 @@ def forward(self, x, foo=None): test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): _logits = MLP(X_batch, foo=1) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 -print(" test foo=1 loss: {}".format(val_loss / n_iter)) -print(" test foo=1 acc: {}".format(val_acc / n_iter)) +print(" test foo=1 loss: {}".format(test_loss / n_iter)) +print(" test foo=1 acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 0506bd005..e6d4baa9e 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -28,7 +28,6 @@ def __init__(self): def forward(self, x, foo=None): z = self.dropout1(x) z = self.dense1(z) - # z = self.bn(z) z = self.dropout2(z) z = self.dense2(z) z = self.dropout3(z) @@ -63,7 +62,7 @@ def generator_train(): train_ds = tl.dataflow.Batch(train_ds,batch_size) -model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index 36b51dba4..70eca198c 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -83,6 +83,7 @@ def make_layer(self, in_channel): # get the network net = CNN() +print(net) # training settings batch_size = 128 n_epoch = 500 @@ -173,7 +174,7 @@ def _map_fn_test(img, target): # compute outputs _logits = net(X_batch) # compute loss and update model - _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss_ce = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss_ce, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -193,7 +194,7 @@ def _map_fn_test(img, target): val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) @@ -204,7 +205,7 @@ def _map_fn_test(img, target): test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py index 694c5fc83..44df03bef 100644 --- a/tensorlayer/cost/mindspore_cost.py +++ b/tensorlayer/cost/mindspore_cost.py @@ -6,7 +6,7 @@ import mindspore.ops as P __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -25,7 +25,33 @@ 'maxnorm_i_regularizer', ] -cross_entropy = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +def softmax_cross_entropy_with_logits(output, target): + """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, + it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. + + Parameters + ---------- + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor + A batch of index with shape: [batch_size, ]. + name : string + Name of this loss. + + Examples + -------- + >>> import tensorlayer as tl + >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits, 'my_loss') + + References + ----------- + - About cross-entropy: ``__. + - The code is borrowed from: ``__. + + """ + + outputs = nn.SoftmaxCrossEntropyWithLogits(sparse=True)(output, target) + return outputs def sigmoid_cross_entropy(output, target, name=None): @@ -41,7 +67,7 @@ def sigmoid_cross_entropy(output, target, name=None): Name of this loss. """ - outputs = P.ReduceMean(cross_entropy(output, target)) + outputs = P.ReduceMean(P.SigmoidCrossEntropyWithLogits()(output, target)) return outputs diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py index 8bebe6859..3464b8cf1 100644 --- a/tensorlayer/cost/paddle_cost.py +++ b/tensorlayer/cost/paddle_cost.py @@ -5,7 +5,7 @@ import paddle as pd __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -25,7 +25,7 @@ ] -def cross_entropy(output, target): +def softmax_cross_entropy_with_logits(output, target): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. @@ -41,7 +41,7 @@ def cross_entropy(output, target): Examples -------- >>> import tensorlayer as tl - >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits) + >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits) References ----------- diff --git a/tensorlayer/cost/tensorflow_cost.py b/tensorlayer/cost/tensorflow_cost.py index d819aa10f..1cab86baf 100644 --- a/tensorlayer/cost/tensorflow_cost.py +++ b/tensorlayer/cost/tensorflow_cost.py @@ -10,7 +10,7 @@ from tensorlayer import logging __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -30,7 +30,7 @@ ] -def cross_entropy(output, target, name=None): +def softmax_cross_entropy_with_logits(output, target, name=None): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. @@ -46,7 +46,7 @@ def cross_entropy(output, target, name=None): Examples -------- >>> import tensorlayer as tl - >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss') + >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits, 'my_loss') References ----------- diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index bbd8d4dd0..934e027a1 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -435,3 +435,7 @@ def ModelLayer(*args, **kwargs): def Seq2seqLuongAttention(*args, **kwargs): raise NonExistingLayerError("Seq2seqLuongAttention is removed for TensorLayer 3.0.") + +__all__ += ['cross_entropy'] +def cross_entropy(*args, **kwargs): + raise NonExistingLayerError("cross_entropy(output, target) --> softmax_cross_entropy_with_logits(output, target)" + __log__) \ No newline at end of file From 5437bdf072c5c64c0a8bc21d10b5ed424a56d331 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 10:36:53 +0800 Subject: [PATCH 18/36] yapf --- docs/user/contributing.rst | 6 +++ tensorlayer/cost/mindspore_cost.py | 1 + tensorlayer/layers/core/core_mindspore.py | 63 +++++++++++++++++++++++ tensorlayer/layers/deprecated.py | 7 ++- tensorlayer/layers/normalization.py | 1 + tensorlayer/models/__init__.py | 2 +- 6 files changed, 78 insertions(+), 2 deletions(-) diff --git a/docs/user/contributing.rst b/docs/user/contributing.rst index 9b1d98f88..43576176a 100644 --- a/docs/user/contributing.rst +++ b/docs/user/contributing.rst @@ -25,6 +25,12 @@ Project Maintainers The TensorLayer project was started by `Hao Dong `_ at Imperial College London in June 2016. +For TensorLayer 3.x, it is now actively developing and maintaining by the following people *(in alphabetical order)*: + +- **Cheng Lai** (`@Laicheng0830 `_) - ``_ +- **Hao Dong** (`@zsdonghao `_) - ``_ +- **Jiarong Han** (`@hanjr92 `_) - ``_ + For TensorLayer 2.x, it is now actively developing and maintaining by the following people who has more than 50 contributions: - **Hao Dong** (`@zsdonghao `_) - ``_ diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py index 44df03bef..1f2e5ba7b 100644 --- a/tensorlayer/cost/mindspore_cost.py +++ b/tensorlayer/cost/mindspore_cost.py @@ -25,6 +25,7 @@ 'maxnorm_i_regularizer', ] + def softmax_cross_entropy_with_logits(output, target): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index 50872aa36..a19a49eaf 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -6,6 +6,14 @@ import tensorlayer as tl from collections import OrderedDict +from mindspore import log as logger +import inspect +from mindspore import context +import numpy +import mindspore as ms +from mindspore.common.api import _pynative_exec +from mindspore.common.parameter import Parameter + __all__ = ['Module', 'SequentialLayer'] _global_layer_name_dict = {} # TODO: better implementation? @@ -119,6 +127,61 @@ def _compute_shape(tensors): shape_mem = tl.get_tensor_shape(tensors) return shape_mem + def __call__(self, *inputs, **kwargs): + if self.__class__.construct is Cell.construct: + logger.warning( + f"The '{self.__class__}' does not override the method 'construct', " + f"will call the super class(Cell) 'construct'." + ) + if kwargs: + bound_args = inspect.signature(self.construct).bind(*inputs, **kwargs) + inputs = bound_args.args + kwargs = bound_args.kwargs + + if context.get_context("mode") == context.GRAPH_MODE: + raise NotImplemented( + "GRAPH MODE is not supported, please select PYNATIVE MODE." + ) + + # if context.get_context("mode") == context.GRAPH_MODE: + # if kwargs: + # raise ValueError("For 'graph' mode, the outermost network does not support passing " + # "variable key-value pair parameters.") + # if self.enable_hook: + # raise ValueError("The graph mode does not support hook function.") + # out = self.compile_and_run(*inputs) + # return out + + self.do_parameter_broadcast() + for item in inputs: + if isinstance(item, numpy.ndarray): + raise TypeError("cell inputs should not be numpy array.") + origin_grad = [] + if self.requires_grad is True: + _pynative_exec.set_grad_flag(True) + _pynative_exec.new_graph(self, *inputs, **kwargs) + for cell in self.cells(): + origin_grad.append(cell.requires_grad) + cell.set_grad(True) + else: + _pynative_exec.set_grad_flag(False) + cast_inputs = list() + if hasattr(self, "_mindspore_flags"): + if self._mindspore_flags.get('fp16'): + cast_inputs = self._cast_mixed_precision_inputs(inputs, ms.float16) + if self._mindspore_flags.get('fp32'): + cast_inputs = self._cast_mixed_precision_inputs(inputs, ms.float32) + if not cast_inputs: + cast_inputs = inputs + output = self.run_construct(cast_inputs, kwargs) + if isinstance(output, Parameter): + output = output.data + if self.requires_grad is True: + _pynative_exec.end_graph(self, output, *inputs, **kwargs) + for i, cell in enumerate(self.cells()): + cell.set_grad(origin_grad[i]) + return output + def _add_node(self, input_tensors, output_tensors): """Add a LayerNode for this layer given input_tensors, output_tensors. diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index 934e027a1..6b1cfe8ca 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -436,6 +436,11 @@ def ModelLayer(*args, **kwargs): def Seq2seqLuongAttention(*args, **kwargs): raise NonExistingLayerError("Seq2seqLuongAttention is removed for TensorLayer 3.0.") + __all__ += ['cross_entropy'] + + def cross_entropy(*args, **kwargs): - raise NonExistingLayerError("cross_entropy(output, target) --> softmax_cross_entropy_with_logits(output, target)" + __log__) \ No newline at end of file + raise NonExistingLayerError( + "cross_entropy(output, target) --> softmax_cross_entropy_with_logits(output, target)" + __log__ + ) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index c7146d6af..613a19f71 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -21,6 +21,7 @@ # 'SwitchNorm', # ] + class BatchNorm(Module): """ The :class:`BatchNorm` is a batch normalization layer for both fully-connected and convolution outputs. diff --git a/tensorlayer/models/__init__.py b/tensorlayer/models/__init__.py index 080f9418c..e0b60ca14 100644 --- a/tensorlayer/models/__init__.py +++ b/tensorlayer/models/__init__.py @@ -9,4 +9,4 @@ # from .vgg import * # from .seq2seq import Seq2seq # from .seq2seq_with_attention import Seq2seqLuongAttention -from .core import Model \ No newline at end of file +from .core import Model From dda098196d7e9144bb52e96ccfcc38566fb46946 Mon Sep 17 00:00:00 2001 From: hanjr Date: Fri, 25 Jun 2021 10:38:05 +0800 Subject: [PATCH 19/36] update dataflow and examples --- .../tutorial_SequentialLayer.py | 11 +- .../tutorial_cifar10_cnn_mindspore_backend.py | 9 +- examples/basic_tutorials/tutorial_dataflow.py | 83 ++++ ...tutorial_mnist_mlp_paddlepaddle_backend.py | 17 +- .../basic_tutorials/tutorial_mnist_simple.py | 62 +-- .../tutorial_nested_usage_of_Layer.py | 4 +- tensorlayer/dataflow/__init__.py | 1 - tensorlayer/dataflow/dataflow_examples.py | 56 --- tensorlayer/dataflow/mindspore_data.py | 119 ++---- tensorlayer/dataflow/paddle_data.py | 120 +++--- tensorlayer/dataflow/tensorflow_data.py | 375 ++++++++++++------ tensorlayer/vision/mindspore_vision.py | 53 +-- tensorlayer/vision/paddle_vision.py | 4 +- 13 files changed, 510 insertions(+), 404 deletions(-) create mode 100644 examples/basic_tutorials/tutorial_dataflow.py delete mode 100644 tensorlayer/dataflow/dataflow_examples.py diff --git a/examples/basic_tutorials/tutorial_SequentialLayer.py b/examples/basic_tutorials/tutorial_SequentialLayer.py index 729d5b1c5..b4527bd9b 100644 --- a/examples/basic_tutorials/tutorial_SequentialLayer.py +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -14,6 +14,7 @@ X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + def generator_train(): inputs = X_train targets = y_train @@ -22,6 +23,7 @@ def generator_train(): for _input, _target in zip(inputs, targets): yield (_input, np.array(_target)) + n_epoch = 50 batch_size = 128 print_freq = 2 @@ -31,13 +33,12 @@ def generator_train(): # print(train_weights) optimizer = tl.optimizers.Momentum(0.05, 0.9) train_ds = tl.dataflow.FromGenerator( - generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] + generator_train, output_types=(tl.float32, tl.int32), column_names=['data', 'label'] ) -train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) -train_ds = tl.dataflow.Batch(train_ds,batch_size) - +train_ds = tl.dataflow.Shuffle(train_ds, shuffle_buffer_size) +train_ds = tl.dataflow.Batch(train_ds, batch_size) model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') -model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file +model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py index 37a45349e..84c7f53f6 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py @@ -23,7 +23,9 @@ tl.logging.set_verbosity(tl.logging.DEBUG) tl.logging.set_verbosity(tl.logging.DEBUG) + class CNN(Module): + def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2d(64, (5, 5), (2, 2), b_init=None, name='conv1', in_channels=3, act=tl.ReLU) @@ -37,7 +39,6 @@ def __init__(self): self.dense2 = Dense(84, act=tl.ReLU, name='dense2relu', in_channels=120) self.dense3 = Dense(10, act=None, name='output', in_channels=84) - def forward(self, x): z = self.conv1(x) z = self.bn(z) @@ -50,14 +51,16 @@ def forward(self, x): z = self.dense3(z) return z + # training settings batch_size = 128 n_epoch = 500 shuffle_buffer_size = 128 - # prepare cifar10 data X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + def generator_train(): inputs = X_train targets = y_train @@ -76,6 +79,7 @@ def generator_test(): # yield _input.encode('utf-8'), _target.encode('utf-8') yield _input, _target + def _map_fn_train(img, target): # 1. Randomly crop a [height, width] section of the image. img = tf.image.random_crop(img, [24, 24, 3]) @@ -142,4 +146,3 @@ def forward(self, x, label): print(" train loss: {}".format(train_loss / n_iter)) print(" train acc: {}".format(train_acc / n_iter)) print(" loss ", loss) - diff --git a/examples/basic_tutorials/tutorial_dataflow.py b/examples/basic_tutorials/tutorial_dataflow.py new file mode 100644 index 000000000..8ebc09b88 --- /dev/null +++ b/examples/basic_tutorials/tutorial_dataflow.py @@ -0,0 +1,83 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' +# os.environ['TL_BACKEND'] = 'paddle' + +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Flatten +from tensorlayer.vision.transforms import Normalize, Compose +from tensorlayer.dataflow import Dataset, IterableDataset + +transform = Compose([Normalize(mean=[127.5], std=[127.5], data_format='HWC')]) + +print('download training data and load training data') + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train = X_train * 255 + +print('load finished') + + +class mnistdataset(Dataset): + + def __init__(self, data=X_train, label=y_train, transform=transform): + self.data = data + self.label = label + self.transform = transform + + def __getitem__(self, index): + data = self.data[index].astype('float32') + data = self.transform(data) + label = self.label[index].astype('int64') + + return data, label + + def __len__(self): + + return len(self.data) + + +class mnistdataset1(IterableDataset): + + def __init__(self, data=X_train, label=y_train, transform=transform): + self.data = data + self.label = label + self.transform = transform + + def __iter__(self): + + for i in range(len(self.data)): + data = self.data[i].astype('float32') + data = self.transform(data) + label = self.label[i].astype('int64') + yield data, label + + +class MLP(Module): + + def __init__(self): + super(MLP, self).__init__() + self.linear1 = Dense(n_units=120, in_channels=784, act=tl.ReLU) + self.linear2 = Dense(n_units=84, in_channels=120, act=tl.ReLU) + self.linear3 = Dense(n_units=10, in_channels=84) + self.flatten = Flatten() + + def forward(self, x): + x = self.flatten(x) + x = self.linear1(x) + x = self.linear2(x) + x = self.linear3(x) + return x + + +train_dataset = mnistdataset1(data=X_train, label=y_train, transform=transform) +train_dataset = tl.dataflow.FromGenerator( + train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] +) +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=128, shuffle=False) + +for i in train_loader: + print(i[0].shape, i[1]) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py index 2ab00ba13..e2e8ac8be 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py @@ -1,19 +1,22 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - +# The tensorlayer and Paddle operators can be mixed import os os.environ['TL_BACKEND'] = 'paddle' -# os.environ['TL_BACKEND'] = 'tensorflow' import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Flatten +import paddle +from paddle.io import TensorDataset print('download training data and load training data') X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) print('load finished') +X_train = paddle.to_tensor(X_train.astype('float32')) +y_train = paddle.to_tensor(y_train.astype('int64')) class MLP(Module): @@ -33,14 +36,16 @@ def forward(self, x): return x -traindataset = tl.dataflow.FromSlices((X_train, y_train)) -train_loader = tl.dataflow.Dataloader(traindataset, batch_size=64, shuffle=True) +traindataset = paddle.io.TensorDataset([X_train, y_train]) +train_loader = paddle.io.DataLoader(traindataset, batch_size=64, shuffle=True) net = MLP() optimizer = tl.optimizers.Adam(learning_rate=0.001) metric = tl.metric.Accuracy() -model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) +model = tl.models.Model( + network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric +) model.train(n_epoch=2, train_dataset=train_loader, print_freq=5, print_train_batch=True) model.save_weights('./model_mlp.npz', format='npz_dict') model.load_weights('./model_mlp.npz', format='npz_dict') -# model.eval(train_loader) \ No newline at end of file +# model.eval(train_loader) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index e6d4baa9e..61d993064 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -3,15 +3,38 @@ # The same set of code can switch the backend with one line import os -os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' - +os.environ['TL_BACKEND'] = 'paddle' import numpy as np import tensorlayer as tl from tensorlayer.layers import Module -from tensorlayer.layers import Dense, Dropout +from tensorlayer.layers import Dense, Dropout, Flatten +from tensorlayer.dataflow import Dataset +from tensorlayer.vision.transforms import Normalize, Compose + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) + +transform = Compose([Normalize(mean=[127.5], std=[127.5], data_format='HWC')]) + + +class mnistdataset(Dataset): + + def __init__(self, data, label, transform): + self.data = data + self.label = label + self.transform = transform + + def __getitem__(self, index): + data = self.data[index].astype('float32') + data = self.transform(data) + label = self.label[index].astype('int64') -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + return data, label + + def __len__(self): + + return len(self.data) class CustomModel(Module): @@ -24,9 +47,11 @@ def __init__(self): self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) self.dropout3 = Dropout(keep=0.8) self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + self.flatten = Flatten() def forward(self, x, foo=None): - z = self.dropout1(x) + z = self.flatten(x) + z = self.dropout1(z) z = self.dense1(z) z = self.dropout2(z) z = self.dense2(z) @@ -37,32 +62,23 @@ def forward(self, x, foo=None): return out -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield (_input, np.array(_target)) - - MLP = CustomModel() n_epoch = 50 batch_size = 128 print_freq = 2 -shuffle_buffer_size = 128 +train_dataset = mnistdataset(data=X_train, label=y_train, transform=transform) +train_dataset = tl.dataflow.FromGenerator( + train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] +) +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) train_weights = MLP.trainable_weights optimizer = tl.optimizers.Momentum(0.05, 0.9) -train_ds = tl.dataflow.FromGenerator( - generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] +metric = tl.metric.Accuracy() +model = tl.models.Model( + network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric ) -train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) -train_ds = tl.dataflow.Batch(train_ds,batch_size) - - -model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) -model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model.train(n_epoch=n_epoch, train_dataset=train_loader, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index 70eca198c..16fb3663c 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -12,7 +12,9 @@ X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + class Block(Module): + def __init__(self, in_channels): super(Block, self).__init__() self.dense1 = Dense(in_channels=in_channels, n_units=256) @@ -209,4 +211,4 @@ def _map_fn_test(img, target): test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) \ No newline at end of file +print(" test acc: {}".format(test_acc / n_iter)) diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py index 6c625acc2..3eb12829c 100644 --- a/tensorlayer/dataflow/__init__.py +++ b/tensorlayer/dataflow/__init__.py @@ -4,7 +4,6 @@ from tensorlayer.backend.ops.load_backend import BACKEND - if BACKEND == 'tensorflow': from .tensorflow_data import * diff --git a/tensorlayer/dataflow/dataflow_examples.py b/tensorlayer/dataflow/dataflow_examples.py deleted file mode 100644 index 2bee24684..000000000 --- a/tensorlayer/dataflow/dataflow_examples.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorlayer as tl -from tensorlayer.dataflow import Dataset -import numpy as np - -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield (_input, np.array(_target)) - - -batch_size = 128 -shuffle_buffer_size = 128 -n_epoch = 10 - -import tensorflow as tf - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -import multiprocessing -train_ds = Dataset.from_generator( - generator=generator_train, output_types=(tl.float32, tl.int32) -) # , output_shapes=((24, 24, 3), (1))) - -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) - -train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) - -for X_batch, y_batch in train_ds: - print(X_batch.shape, y_batch.shape) diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py index e50c15bfe..9c12d87a7 100644 --- a/tensorlayer/dataflow/mindspore_data.py +++ b/tensorlayer/dataflow/mindspore_data.py @@ -5,31 +5,41 @@ import mindspore as ms from enum import Enum __all__ = [ - 'Apply', 'Batch', 'Concat', - 'Filter', - 'Flat_map', 'FromGenerator', 'FromSlices', 'Map', - 'Prefetch', 'Repeat', 'Shuffle', - 'Skip', - 'Take', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -class shuffle_str(str, Enum): - GLOBAL: str = "global" - FILES: str = "file" +class Dataset(object): + def __init__(self): + pass -def Apply(dataset, transformation_func): + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) - return dataset.apply(transformation_func) + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) + + +class IterableDataset(object): + + def __init__(self): + pass + + def __iter__(self): + raise NotImplementedError("'{}' not implement in class " \ + "{}".format('__iter__', self.__class__.__name__)) def Batch(dataset, batch_size, drop_last=False): @@ -47,39 +57,26 @@ def Batch(dataset, batch_size, drop_last=False): return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) -def Concat(dataset_1, dataset_2): - - return dataset_1.concat(dataset_2) - - -def Filter(dataset, predicate): - - return dataset.filter(predicate) - +def Concat(datasets): -def Flat_map(dataset, map_func): - - return dataset.flat_map(map_func) + datasets = list(datasets) + dataset = ds.Dataset.concat(datasets) + return dataset -def FromGenerator(generator, transform = None): +def FromGenerator(generator, output_types, column_names): - return ds.GeneratorDataset(source=generator, column_names=["data", "label"]) + output_types = list(output_types) + column_names = list(column_names) + return ds.GeneratorDataset(source=generator, column_names=column_names, column_types=output_types) -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): +def FromSlices(datas, column_names): - return ds.NumpySlicesDataset( - data=tensor, column_names=column_names, num_samples=num_samples, num_parallel_workers=num_parallel_workers, - shuffle=shuffle, sampler=sampler, num_shards=num_shards, shard_id=shard_id - ) + return ds.NumpySlicesDataset(data=datas, column_names=column_names) -def Map( - dataset, map_func, input_columns=None): +def Map(dataset, map_func, input_columns=None): """ Maps map_func across the elements of this dataset. Parameters @@ -94,14 +91,7 @@ def Map( ------- """ - return dataset.map( - operations=map_func, input_columns=input_columns - ) - - -def Prefetch(dataset, buffer_size): - - return dataset.config.set_prefetch_size(buffer_size) + return dataset.map(operations=map_func, input_columns=input_columns) def Repeat(dataset, count=None): @@ -109,47 +99,11 @@ def Repeat(dataset, count=None): return dataset.repeat(count) -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): - - #dataset.config.set_seed(seed) +def Shuffle(dataset, buffer_size): return dataset.shuffle(buffer_size) -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - - - Returns - ------- - - ''' - return dataset.skip(count) - - -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. - Returns - ------- - - ''' - return dataset.take(count) - def Zip(datasets): ''' Creates a Dataset by zipping together the given datasets. @@ -161,15 +115,14 @@ def Zip(datasets): ------- ''' + datasets = tuple(datasets) return ds.zip(datasets) -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=2, shuffle_buffer_size=10000): - +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, shuffle_buffer_size=10000): if shuffle: dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) - return dataset diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py index fac24a33c..d442a8fd7 100644 --- a/tensorlayer/dataflow/paddle_data.py +++ b/tensorlayer/dataflow/paddle_data.py @@ -3,118 +3,96 @@ import numpy as np import paddle -from paddle.io import Dataset, BatchSampler, DataLoader, IterableDataset +from paddle.io import Dataset as dataset +from paddle.io import IterableDataset as iterabledataset +from paddle.io import DataLoader __all__ = [ + 'Batch', 'Concat', 'FromGenerator', 'FromSlices', 'Map', - # 'Shuffle', - # 'Batch', + 'Repeat', + 'Shuffle', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -def to_list(value): - if value is None: - return value - if isinstance(value, (list, tuple)): - return list(value) - return [value] +class Dataset(dataset): + def __init__(self): + pass -class FromGenerator(Dataset): + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) - def __init__(self, generator, transform = None): + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) - if not callable(generator): - raise TypeError("'generator' must be callable") - self.generator = generator() - self.transform = transform - self.datas = [] - self.labels = [] - for data, label in self.generator: - self.datas.append(data) - self.labels.append(label) - def __getitem__(self, idx): - x = self.datas[idx] - if self.transform: - x = self.transform(x) - y = self.labels[idx] +class IterableDataset(iterabledataset): - return x, y + def __init__(self): + pass - def __len__(self): + def __iter__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__iter__', self.__class__.__name__)) - return len(self.datas) + def __getitem__(self, idx): + raise RuntimeError("'{}' should not be called for IterableDataset" \ + "{}".format('__getitem__', self.__class__.__name__)) + def __len__(self): + raise RuntimeError("'{}' should not be called for IterableDataset" \ + "{}".format('__len__', self.__class__.__name__)) -class FromSlices(Dataset): - def __init__(self, datas, transform = None): - self.datas = datas[0] - self.labels = datas[1] - self.transform = transform +def FromGenerator(generator, output_types=None, column_names=None): - if len(self.datas) != len(self.labels): - raise ValueError('Datas and labels not have same shape of the 1st dimension.') + return generator - def __getitem__(self, idx): - data = paddle.to_tensor(self.datas[idx], dtype='float32') - label = paddle.to_tensor(self.labels[idx], dtype='int64') - if self.transform is not None: - data = self.transform(data) - return data, label +def FromSlices(datas, column_names=None): - def __len__(self): + datas = list(datas) + return paddle.io.TensorDataset(datas) - return len(self.datas) +def Concat(datasets): -class Concat(IterableDataset): + return paddle.io.ChainDataset(list(datasets)) - def __init__(self, datasets): - self.datasets = list(datasets) - assert len(self.datasets) > 0, "input datasets shoule not be empty" - for i, dataset in enumerate(self.datasets): - assert isinstance(dataset, IterableDataset), \ - "Concat only support paddle.io.IterableDataset" - def __iter__(self): - for dataset in self.datasets: - for sample in dataset: - yield sample +def Zip(datasets): + return paddle.io.ComposeDataset(list(datasets)) -class Map(Dataset): - def __init__(self, dataset, transform): - # self.isDataset = False - self.transform = transform - self.dataset = dataset +def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, shuffle_buffer_size=0): + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, return_list=True) - def __getitem__(self, idx): - x = self.dataset[idx][0] - # if not isinstance(x, np.ndarray): - # x = np.asarray(x) +def Batch(dataset, batch_size, drop_last=False): - if self.transform: - x = self.transform(x) - y = self.dataset[idx][1] + raise NotImplementedError('This function not implement in paddle backend.') - return x, y +def Shuffle(dataset, buffer_size, seed=None): + + raise NotImplementedError('This function not implement in paddle backend.') - def __len__(self): - return len(self.dataset) +def Repeat(dataset, count=None): + raise NotImplementedError('This function not implement in paddle backend.') -def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): +def Map(dataset, map_func, input_columns=None): - return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) + raise NotImplementedError('This function not implement in paddle backend.') diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py index 85c4c593c..312565078 100644 --- a/tensorlayer/dataflow/tensorflow_data.py +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -2,220 +2,357 @@ # -*- coding: utf-8 -*- import tensorflow as tf - +import tensorlayer as tl +import numpy as np __all__ = [ - 'Apply', 'Batch', 'Concat', - 'Filter', - 'Flat_map', 'FromGenerator', 'FromSlices', 'Map', - 'Prefetch', 'Repeat', 'Shuffle', - 'Skip', - 'Take', 'Zip', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -def Apply(dataset, transformation_func): - """Applies a transformation function to this dataset. - `apply` enables chaining of custom `Dataset` transformations, which are - represented as functions that take one `Dataset` argument and return a - transformed `Dataset`. +class Dataset(object): + """An abstract class to encapsulate methods and behaviors of datasets. + All datasets in map-style(dataset samples can be get by a given key) should be a subclass of 'tensorlayer.dataflow.Dataset'. + ALl subclasses should implement following methods: + :code:`__getitem__`: get sample from dataset with a given index. + :code:`__len__`: return dataset sample number. + + Examples + ---------- + With TensorLayer + + >>> from tensorlayer.dataflow import Dataset + >>> class mnistdataset(Dataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __getitem__(self, index): + >>> data = self.data[index].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[index].astype('int64') + >>> return data, label + >>> def __len__(self): + >>> return len(self.data) + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + + """ - [0, 1, 2, 3, 4] - Args: - transformation_func: A function that takes one `Dataset` argument and - returns a `Dataset`. - Returns: - Dataset: The `Dataset` returned by applying `transformation_func` to this - dataset. - """ - return dataset.apply(transformation_func) + def __init__(self): + pass + def __call__(self): -def Batch(dataset, batch_size, drop_last=False): - ''' + return self + + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) + + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) + + +class IterableDataset(object): + """An abstract class to encapsulate methods and behaviors of iterable datasets. + All datasets in iterable-style (can only get sample one by one sequentially, likea Python iterator) should be a subclass of `tensorlayer.dataflow.IterableDataset`. + All subclasses should implement following methods: + :code:`__iter__`: yield sample sequentially. + + Examples + ---------- + With TensorLayer + + >>> class mnistdataset(IterableDataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __iter__(self): + >>> for i in range(len(self.data)): + >>> data = self.data[i].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[i].astype('int64') + >>> yield data, label + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + + """ + + def __init__(self): + pass + + def __call__(self): + + return self + + def __iter__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__iter__', self.__class__.__name__)) + + +def FromGenerator(generator, output_types, column_names=None): + """Creates a `Dataset` whose elements are generated by `generator`. Parameters ---------- - dataset - batch_size - drop_remainder + generator: Callable or Iterable + A generator callable object or an iterable Python object. + output_types: list or tuple + Set output data type. This parameter not support in MindSpore backend and Paddle backend. + column_names: list or tuple + column names of the dataset. This parameter not support in TensorFlow backend and Paddle backend. Returns ------- + Dataset + A Dataset. - ''' - return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) + Examples + ---------- + With TensorLayer + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + >>> train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) -def Concat(dataset_1, dataset_2): + """ + output_types = tuple(output_types) + return tf.data.Dataset.from_generator(generator, output_types=output_types) - return dataset_1.concatenate(dataset_2) +def Batch(dataset, batch_size, drop_last=False): + """Combine batch_size number of consecutive rows into batches.This function not implement in Paddle backend. -def Filter(dataset, predicate): - ''' - Filters this dataset according to predicate. Parameters ---------- - dataset : - A dataset - predicate : - A function mapping a dataset element to a boolean. - Returns : - The Dataset containing the elements of this dataset for which predicate is True. + dataset: + A dataset. + batch_size: int + Sample number in a mini-batch. + drop_last: boolean + whether drop the last incomplete batch dataset size is not divisible by the batch size. + + Returns ------- + Dataset + A batchDataset. + """ + + return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) - ''' - return dataset.filter(predicate) +def Concat(datasets): + """Concatenate the datasets in the input list of datasets. -def Flat_map(dataset, map_func): - ''' - Maps map_func across this dataset and flattens the result. Parameters ---------- - dataset: - A dataset - map_func - A function mapping a dataset element to a dataset. + datasets: dataset + A list of datasets. + Returns - A Dataset. ------- + Dataset + datasets concatenated. - ''' - return dataset.flat_map(map_func) - + Examples + ---------- + With TensorLayer -def FromGenerator( - generator, output_types = (tf.float32, tf.int32), column_names=None -): - """Creates a `Dataset` whose elements are generated by `generator`. + >>> dataset = tl.dataflow.Concat([dataset1, dataset2]) - generator: - A callable object """ - return tf.data.Dataset.from_generator(generator, output_types = output_types, output_shapes=None, args=None) + dataset_num = len(datasets) + dataset = datasets[0] + for i in range(1, dataset_num): + dataset.concatenate(datasets[i]) + return dataset -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): - return tf.data.Dataset.from_tensor_slices(tensor) +def FromSlices(datas, column_names=None): + """Creates a dataset with given data slices. + Parameters + ---------- + datas: list or tuple + Each data should be in shape of [N, …], while N is the sample number. + Input data will be sliced along the first dimension and generate additional rows + column_names: list + List of column names of the dataset. This parameter not support in TensorFlow backend and Paddle backend. -def Map( - dataset, map_func, input_columns=None, output_columns=None, column_order=None, - num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None -): - """ Maps map_func across the elements of this dataset. + Returns + ------- + Dataset + A dataset. + + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.FromSlices([data1, data2]) + + """ + + return tf.data.Dataset.from_tensor_slices(datas) + + +def Map(dataset, map_func, input_columns=None): + """ Maps map_func across the elements of this dataset. This function not implement in Paddle backend. Parameters ---------- - dataset : DataFlow - input DataFlow + dataset : Dataset + A dataset to map. map_func : function A function mapping a dataset element to another dataset element. - num_parallel_calls + input_columns: list + List of column names of the dataset to map. This parameter not support in TensorFlow backend. Returns ------- + Dataset + A mapped dataset. + + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.Map(dataset, map_func) """ return dataset.map(map_func) -def Prefetch(dataset, buffer_size): - ''' - Creates a Dataset that prefetches elements from this dataset. +def Repeat(dataset, count=None): + """ Repeat this dataset count times. This function not implement in Paddle backend. + Parameters ---------- - dataset: Dataflow - A dataset - buffer_size : - A tf.int64 scalar tf.Tensor, representing the maximum number of elements that will be buffered when prefetching. + dataset : Dataset + A dataset to repeat. + count : int + The number of times the dataset should be repeated. The default behavior (if count is None or -1) is for the dataset be repeated indefinitely. + Returns - A Dataset ------- + Dataset + A repeated dataset. - ''' - return dataset.prefetch(buffer_size=buffer_size) + Examples + ---------- + With TensorLayer + >>> dataset = tl.dataflow.Repeat(dataset, 2) -def Repeat(dataset, count=None): + """ return dataset.repeat(count=count) -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=True): - return dataset.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) - +def Shuffle(dataset, buffer_size): + """ Randomly shuffles the elements of this dataset.This function not implement in Paddle backend. -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. Parameters ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - If count is greater than the size of this dataset, the new dataset will contain no elements. - If count is -1, skips the entire dataset. + dataset : Dataset + A dataset to shuffle. + buffer_size : int + The number of elements from this dataset from which the new dataset will sample. Returns ------- + Dataset + A shuffled dataset. - ''' - return dataset.skip(count) + Examples + ---------- + With TensorLayer + >>> dataset = tl.dataflow.Shuffle(dataset, 2000) -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. - Returns - ------- + """ + return dataset.shuffle(buffer_size, seed=None, reshuffle_each_iteration=True) - ''' - return dataset.take(count) def Zip(datasets): - ''' - Creates a Dataset by zipping together the given datasets. + """ Creates a Dataset by zipping together the given datasets.This function not implement in Paddle backend. + Parameters ---------- - datasets: - A tuple of datasets to be zipped together. + datasets : list + A list of datasets to zip. + Returns ------- + Dataset + A zip dataset. + + Examples + ---------- + With TensorLayer - ''' + >>> dataset = tl.dataflow.Zip([dataset1, dataset2]) + + """ return tf.data.Dataset.zip(datasets) +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, shuffle_buffer_size=10000): + """ Creates a Datasetloader to trian network. We recommend using this function. + + Parameters + ---------- + dataset : Dataset + the dataset to load data from. + batch_size: int or None + sample number in a mini-batch. + shuffle: boolean + whther to shuffle indices order before genrate batch indices. + drop_last: boolean + whether drop the last incomplete batch dataset size is not divisible by the batch size. + shuffle_buffer_size: int + The number of elements from this dataset from which the new dataset will sample. This parameter not support in Paddle backend. + + Returns + ------- + DataLoader + an iterable object for data iterating, each elemnet of the generated data is a Tensor. -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=2, shuffle_buffer_size=10000): + Examples + ---------- + With TensorLayer + + >>> from tensorlayer.dataflow import Dataset + >>> class mnistdataset(Dataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __getitem__(self, index): + >>> data = self.data[index].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[index].astype('int64') + >>> return data, label + >>> def __len__(self): + >>> return len(self.data) + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + >>> train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) + >>> train_dataloader = tl.dataflow.Dataloader(train_dataset, batch_size=128, shuffle=True, drop_last=False, shuffle_buffer_size=2000) + """ if shuffle: - dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) - dataset = Prefetch(dataset, buffer_size=prefetch) + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return dataset diff --git a/tensorlayer/vision/mindspore_vision.py b/tensorlayer/vision/mindspore_vision.py index bb8cbc9e1..b9d70a260 100644 --- a/tensorlayer/vision/mindspore_vision.py +++ b/tensorlayer/vision/mindspore_vision.py @@ -95,7 +95,7 @@ def to_tensor(image, data_format='HWC'): if not (_is_pil_image(image) or _is_numpy_image(image)): raise TypeError('image should be PIL Image or ndarray. Got {}'.format(type(image))) - image = np.asarray(image) + image = np.asarray(image).astype('float32') if image.ndim == 2: image = image[:, :, None] @@ -104,10 +104,8 @@ def to_tensor(image, data_format='HWC'): image = np.transpose(image, (2, 0, 1)) image = image / 255. - image = ms.Tensor(image, dtype=ms.float32) else: image = image / 255. - image = ms.Tensor(image, dtype=ms.float32) return image @@ -301,12 +299,10 @@ def padtoboundingbox(image, offset_height, offset_width, target_height, target_w def normalize(image, mean, std, data_format): - if not _is_tensor_image(image): - if _is_pil_image(image): - image = np.asarray(image) - image = ms.Tensor(image) + if _is_pil_image(image): + image = np.asarray(image) - image = image.astype('float32', copy=False) + image = image.astype('float32') if data_format == 'CHW': num_channels = image.shape[0] @@ -323,17 +319,13 @@ def normalize(image, mean, std, data_format): elif isinstance(std, (list, tuple)): if len(std) != num_channels: raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + mean = np.array(mean, dtype=image.dtype) + std = np.array(std, dtype=image.dtype) if data_format == 'CHW': - std = np.array(std).reshape((-1, 1, 1)) - mean = np.array(mean).reshape((-1, 1, 1)) + image = (image - mean[None, None, :]) / std[None, None, :] elif data_format == 'HWC': - mean = np.array(mean).reshape((1, 1, -1)) - std = np.array(std).reshape((1, 1, -1)) - - std = ms.Tensor(std, dtype=ms.float32) - mean = ms.Tensor(mean, dtype=ms.float32) - image = (image - mean) / std + image = (image - mean[None, None, :]) / std[None, None, :] return image @@ -343,24 +335,17 @@ def standardize(image): Reference to tf.image.per_image_standardization(). Linearly scales each image in image to have mean 0 and variance 1. ''' - if not _is_tensor_image(image): - if _is_pil_image(image): - image = np.asarray(image) - image = ms.Tensor(image) - - image = image.astype('float32', copy=False) - - num_pixels = ms.Tensor(image.size).astype('float32', copy=False) - image_mean_ops = ms.ops.ReduceMean(keep_dims=False) - image_mean = image_mean_ops(image) - image_mean = image_mean.reshape((1, 1, 1)) - stddev = std(image) - stddev = stddev.reshape((1, 1, 1)) - sqrt = P.Sqrt() - min_stddev = 1.0 / sqrt(num_pixels) - min_stddev = min_stddev.reshape((1, 1, 1)) - std_max = P.Maximum() - adjusted_stddev = std_max(stddev, min_stddev) + + if _is_pil_image(image): + image = np.asarray(image) + + image = image.astype('float32') + + num_pixels = image.size + image_mean = np.mean(image, keep_dims=False) + stddev = np.std(image, keep_dims=False) + min_stddev = 1.0 / np.sqrt(num_pixels) + adjusted_stddev = np.maximum(stddev, min_stddev) return (image - image_mean) / adjusted_stddev diff --git a/tensorlayer/vision/paddle_vision.py b/tensorlayer/vision/paddle_vision.py index df6fc230f..4c188efe4 100644 --- a/tensorlayer/vision/paddle_vision.py +++ b/tensorlayer/vision/paddle_vision.py @@ -314,8 +314,8 @@ def normalize(image, mean, std, data_format): std = np.array(std).reshape((-1, 1, 1)) mean = np.array(mean).reshape((-1, 1, 1)) elif data_format == 'HWC': - mean = np.array(mean).reshape((1, 1, -1)) - std = np.array(std).reshape((1, 1, -1)) + mean = np.array(mean) + std = np.array(std) mean = paddle.to_tensor(mean).astype('float32') std = paddle.to_tensor(std).astype('float32') From d15b6e71be3e908d9f05c82334a4ced944be9eac Mon Sep 17 00:00:00 2001 From: hanjr Date: Fri, 25 Jun 2021 10:40:58 +0800 Subject: [PATCH 20/36] add dataflow doc --- docs/index.rst | 1 + docs/modules/dataflow.rst | 79 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 docs/modules/dataflow.rst diff --git a/docs/index.rst b/docs/index.rst index 6bab309da..27cac3d66 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -57,6 +57,7 @@ method, this part of the documentation is for you. modules/activation modules/array_ops modules/cost + modules/dataflow modules/prepro modules/files modules/iterate diff --git a/docs/modules/dataflow.rst b/docs/modules/dataflow.rst new file mode 100644 index 000000000..5ffcc5656 --- /dev/null +++ b/docs/modules/dataflow.rst @@ -0,0 +1,79 @@ +API - Dataflow +============ + +.. automodule:: tensorlayer.dataflow + +.. ----------------------------------------------------------- +.. Dataflow List +.. ----------------------------------------------------------- + +Dataflow list +---------------------- + +.. autosummary:: + + Dataset + IterableDataset + FromGenerator + FromSlices + Dataloader + + Concat + Zip + Batch + Map + Repeat + Shuffle + +.. ----------------------------------------------------------- +.. Dataflow +.. ----------------------------------------------------------- + +Dataflow +----------------- + +Dataset +^^^^^^^^^^^^^^^^ +.. autoclass:: Dataset + + +IterableDataset +^^^^^^^^^^^^^^^^ +.. autoclass:: IterableDataset + +FromGenerator +^^^^^^^^^^^^^^^^ +.. autoclass:: FromGenerator + +FromSlices +^^^^^^^^^^^^^^^^ +.. autoclass:: FromSlices + +Dataloader +^^^^^^^^^^^^^^^^ +.. autoclass:: Dataloader + +Concat +^^^^^^^^^^^^^^^^ +.. autoclass:: Concat + +Zip +^^^^^^^^^^^^^^^^ +.. autoclass:: Zip + +Batch +^^^^^^^^^^^^^^^^ +.. autoclass:: Batch + +Map +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Map + +Repeat +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Repeat + +Shuffle +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Shuffle + From 4b5f7025c6755b3341cf740a1678e8fe2f42dac8 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 11:09:00 +0800 Subject: [PATCH 21/36] update docs --- docs/user/contributing.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/user/contributing.rst b/docs/user/contributing.rst index 43576176a..64c8354e3 100644 --- a/docs/user/contributing.rst +++ b/docs/user/contributing.rst @@ -4,8 +4,8 @@ Contributing =============== -TensorLayer 2.0 is a major ongoing research project in CFCS, Peking University, the first version was established at Imperial College London in 2016. The goal of the project is to develop a compositional language while complex learning systems -can be built through composition of neural network modules. +TensorLayer 3.0 is a major ongoing research project in Peking University and Pengcheng Laboratory, the first version was established at Imperial College London in 2016. The goal of the project is to develop a compositional languagea that is compatible with multiple deep learning frameworks, +while complex learning systems can be built through composition of neural network modules. Numerous contributors come from various horizons such as: Imperial College London, Tsinghua University, Carnegie Mellon University, Stanford, University of Technology of Compiegne, Google, Microsoft, Bloomberg and etc. @@ -27,9 +27,9 @@ The TensorLayer project was started by `Hao Dong `_ For TensorLayer 3.x, it is now actively developing and maintaining by the following people *(in alphabetical order)*: -- **Cheng Lai** (`@Laicheng0830 `_) - ``_ +- **Cheng Lai** (`@Laicheng0830 `_) - ``_ - **Hao Dong** (`@zsdonghao `_) - ``_ -- **Jiarong Han** (`@hanjr92 `_) - ``_ +- **Jiarong Han** (`@hanjr92 `_) - ``_ For TensorLayer 2.x, it is now actively developing and maintaining by the following people who has more than 50 contributions: From 0fc72f12443cef59f0e9e2cb6c7fd608cbd87249 Mon Sep 17 00:00:00 2001 From: hanjr Date: Fri, 25 Jun 2021 14:31:59 +0800 Subject: [PATCH 22/36] fix tutorial_mnist_simple.py --- .../basic_tutorials/tutorial_mnist_simple.py | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 61d993064..c72464646 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -3,9 +3,9 @@ # The same set of code can switch the backend with one line import os -# os.environ['TL_BACKEND'] = 'tensorflow' +os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' -os.environ['TL_BACKEND'] = 'paddle' +# os.environ['TL_BACKEND'] = 'paddle' import numpy as np import tensorlayer as tl from tensorlayer.layers import Module @@ -15,12 +15,11 @@ X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) -transform = Compose([Normalize(mean=[127.5], std=[127.5], data_format='HWC')]) - +transform = Compose([Normalize(mean=[127.5/255.], std=[127.5/255.], data_format='HWC')]) class mnistdataset(Dataset): - def __init__(self, data, label, transform): + def __init__(self, data = X_train, label = y_train ,transform = transform): self.data = data self.label = label self.transform = transform @@ -61,24 +60,21 @@ def forward(self, x, foo=None): out = tl.ops.relu(out) return out - MLP = CustomModel() n_epoch = 50 batch_size = 128 print_freq = 2 -train_dataset = mnistdataset(data=X_train, label=y_train, transform=transform) -train_dataset = tl.dataflow.FromGenerator( - train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] -) -train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) + train_weights = MLP.trainable_weights -optimizer = tl.optimizers.Momentum(0.05, 0.9) +optimizer = tl.optimizers.Momentum(0.001, 0.9) metric = tl.metric.Accuracy() -model = tl.models.Model( - network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric -) +train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) +train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) + +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) model.train(n_epoch=n_epoch, train_dataset=train_loader, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') -model.load_weights('./model.npz', format='npz_dict') +model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file From 9f0136d2c67906cca9f03987144bcc6c68bc1637 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 15:06:22 +0800 Subject: [PATCH 23/36] update paddle backend --- tensorlayer/backend/ops/paddle_nn.py | 6 +++++- tensorlayer/models/core.py | 10 +++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 0f28537a9..c662e400e 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -338,7 +338,11 @@ def __init__(self, keep, seed=1): self.seed = seed def __call__(self, inputs): - raise NotImplementedError + output = F.dropout( + inputs, + p=self.keep, + mode='upscale_in_train') + return output class BiasAdd(object): diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 42eb595b2..1d016cc25 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -30,9 +30,9 @@ class Model: The training or testing network. loss_fn : function Objective function - optimizer : function + optimizer : class Optimizer for updating the weights - metrics : function + metrics : class Dict or set of metrics to be evaluated by the model during Methods @@ -65,7 +65,7 @@ class Model: >>> return out >>> >>> net = Net() - >>> loss = tl.cost.cross_entropy + >>> loss = tl.cost.softmax_cross_entropy_with_logits >>> optim = tl.optimizers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) >>> dataset = get_dataset() @@ -150,7 +150,7 @@ def save_weights(self, file_path, format=None): >>> net = vgg16() >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) >>> metric = tl.metric.Accuracy() - >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) >>> model.save_weights('./model.h5') ... >>> model.load_weights('./model.h5') @@ -195,7 +195,7 @@ def load_weights(self, file_path, format=None, in_order=True, skip=False): >>> net = vgg16() >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) >>> metric = tl.metric.Accuracy() - >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch >>> model.load_weights('./model_eager.h5') # load sequentially From 081a2b85a103765f42b5487eb25a01e8c6af6c35 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 15:17:41 +0800 Subject: [PATCH 24/36] yapf --- examples/basic_tutorials/tutorial_SequentialLayer.py | 2 +- .../tutorial_automatic_inference_input _shape.py | 2 +- .../tutorial_cifar10_cnn_mindspore_backend.py | 2 +- .../tutorial_cifar10_cnn_tensorflow_backend.py | 2 +- examples/basic_tutorials/tutorial_dataflow.py | 1 + .../tutorial_mnist_mlp_mindspore_backend.py | 10 ++++------ .../tutorial_mnist_mlp_paddlepaddle_backend.py | 1 + .../tutorial_mnist_mlp_tensorflow_backend.py | 2 +- examples/basic_tutorials/tutorial_mnist_simple.py | 6 +++--- .../basic_tutorials/tutorial_nested_usage_of_Layer.py | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/basic_tutorials/tutorial_SequentialLayer.py b/examples/basic_tutorials/tutorial_SequentialLayer.py index b4527bd9b..6900093ed 100644 --- a/examples/basic_tutorials/tutorial_SequentialLayer.py +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- from tensorlayer.layers import SequentialLayer diff --git a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py index 6cc8fbb01..5eaec760b 100644 --- a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py +++ b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- import numpy as np diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py index 84c7f53f6..059b15620 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- import os diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py index edfa9f8a4..8283cffa3 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- # The tensorlayer and tensorflow operators can be mixed diff --git a/examples/basic_tutorials/tutorial_dataflow.py b/examples/basic_tutorials/tutorial_dataflow.py index 8ebc09b88..57e1cd207 100644 --- a/examples/basic_tutorials/tutorial_dataflow.py +++ b/examples/basic_tutorials/tutorial_dataflow.py @@ -1,5 +1,6 @@ #! /usr/bin/python # -*- coding: utf-8 -*- + import os os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py index e4802211e..60e32d0b4 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- -import mindspore.nn as nn + import mindspore.ops.operations as P from mindspore.ops import composite as C -from mindspore.common import dtype as mstype -from mindspore import context, Tensor, ParameterTuple -from mindspore.common.initializer import TruncatedNormal -from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, WithLossCell +from mindspore import ParameterTuple +from mindspore.nn import Momentum, WithLossCell import numpy as np import tensorlayer as tl diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py index e2e8ac8be..c93cc87ed 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py @@ -1,6 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- # The tensorlayer and Paddle operators can be mixed + import os os.environ['TL_BACKEND'] = 'paddle' diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py index d959eea36..8833b5791 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- # The tensorlayer and tensorflow operators can be mixed diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index c72464646..8401f27ac 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- # The same set of code can switch the backend with one line @@ -6,7 +6,7 @@ os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' # os.environ['TL_BACKEND'] = 'paddle' -import numpy as np + import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Dropout, Flatten @@ -68,7 +68,7 @@ def forward(self, x, foo=None): train_weights = MLP.trainable_weights -optimizer = tl.optimizers.Momentum(0.001, 0.9) +optimizer = tl.optimizers.Momentum(0.05, 0.9) metric = tl.metric.Accuracy() train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index 16fb3663c..faedf1029 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- import time From f3f1ac8a24fc33c42a18825b0100240b29c9a5eb Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 15:23:22 +0800 Subject: [PATCH 25/36] fix examples --- examples/basic_tutorials/tutorial_mnist_simple.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 8401f27ac..7dc7147a0 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -68,7 +68,7 @@ def forward(self, x, foo=None): train_weights = MLP.trainable_weights -optimizer = tl.optimizers.Momentum(0.05, 0.9) +optimizer = tl.optimizers.Momentum(0.001, 0.9) metric = tl.metric.Accuracy() train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) From 2006b6ce0aecbeac9a0141fca7ed26a0166d28c8 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Fri, 25 Jun 2021 15:38:22 +0800 Subject: [PATCH 26/36] fix cost --- tensorlayer/cost/mindspore_cost.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py index 1f2e5ba7b..7ad7dab0b 100644 --- a/tensorlayer/cost/mindspore_cost.py +++ b/tensorlayer/cost/mindspore_cost.py @@ -51,7 +51,7 @@ def softmax_cross_entropy_with_logits(output, target): """ - outputs = nn.SoftmaxCrossEntropyWithLogits(sparse=True)(output, target) + outputs = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')(output, target) return outputs From de963e2af95e6e812a981e37dce2b3ab5656efa9 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 28 Jun 2021 16:25:06 +0800 Subject: [PATCH 27/36] update examples --- .../tutorial_SequentialLayer.py | 2 ++ ...torial_automatic_inference_input _shape.py | 2 ++ ...tutorial_cifar10_cnn_tensorflow_backend.py | 2 ++ .../tutorial_mnist_mlp_mindspore_backend.py | 2 ++ .../tutorial_mnist_mlp_tensorflow_backend.py | 2 ++ .../basic_tutorials/tutorial_mnist_simple.py | 21 ++++++-------- .../tutorial_nested_usage_of_Layer.py | 2 ++ tensorlayer/cost/mindspore_cost.py | 28 +------------------ 8 files changed, 21 insertions(+), 40 deletions(-) diff --git a/examples/basic_tutorials/tutorial_SequentialLayer.py b/examples/basic_tutorials/tutorial_SequentialLayer.py index 6900093ed..1780d3fb2 100644 --- a/examples/basic_tutorials/tutorial_SequentialLayer.py +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -1,5 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' from tensorlayer.layers import SequentialLayer from tensorlayer.layers import Dense diff --git a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py index 5eaec760b..3318b6982 100644 --- a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py +++ b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py @@ -1,5 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' import numpy as np import time diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py index 8283cffa3..97acb447c 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py @@ -1,6 +1,8 @@ #! /usr/bin/python # -*- coding: utf-8 -*- # The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'tensorflow' import time import numpy as np diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py index 60e32d0b4..d23d785d1 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py @@ -1,5 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'mindspore' import mindspore.ops.operations as P from mindspore.ops import composite as C diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py index 8833b5791..2ed6771db 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py @@ -1,6 +1,8 @@ #! /usr/bin/python # -*- coding: utf-8 -*- # The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'tensorflow' import numpy as np import time diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 7dc7147a0..20847d994 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -3,30 +3,27 @@ # The same set of code can switch the backend with one line import os -os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' -# os.environ['TL_BACKEND'] = 'paddle' +os.environ['TL_BACKEND'] = 'paddle' import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Dropout, Flatten from tensorlayer.dataflow import Dataset -from tensorlayer.vision.transforms import Normalize, Compose -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) -transform = Compose([Normalize(mean=[127.5/255.], std=[127.5/255.], data_format='HWC')]) +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + class mnistdataset(Dataset): - def __init__(self, data = X_train, label = y_train ,transform = transform): + def __init__(self, data = X_train, label = y_train): self.data = data self.label = label - self.transform = transform def __getitem__(self, index): data = self.data[index].astype('float32') - data = self.transform(data) label = self.label[index].astype('int64') return data, label @@ -46,11 +43,9 @@ def __init__(self): self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) self.dropout3 = Dropout(keep=0.8) self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) - self.flatten = Flatten() def forward(self, x, foo=None): - z = self.flatten(x) - z = self.dropout1(z) + z = self.dropout1(x) z = self.dense1(z) z = self.dropout2(z) z = self.dense2(z) @@ -68,9 +63,9 @@ def forward(self, x, foo=None): train_weights = MLP.trainable_weights -optimizer = tl.optimizers.Momentum(0.001, 0.9) +optimizer = tl.optimizers.Momentum(0.05, 0.9) metric = tl.metric.Accuracy() -train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) +train_dataset = mnistdataset(data = X_train, label = y_train) train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index faedf1029..24c3574dd 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -1,5 +1,7 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' import time import numpy as np diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py index 7ad7dab0b..d2d054b10 100644 --- a/tensorlayer/cost/mindspore_cost.py +++ b/tensorlayer/cost/mindspore_cost.py @@ -26,33 +26,7 @@ ] -def softmax_cross_entropy_with_logits(output, target): - """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, - it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. - - Parameters - ---------- - output : Tensor - A batch of distribution with shape: [batch_size, num of classes]. - target : Tensor - A batch of index with shape: [batch_size, ]. - name : string - Name of this loss. - - Examples - -------- - >>> import tensorlayer as tl - >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits, 'my_loss') - - References - ----------- - - About cross-entropy: ``__. - - The code is borrowed from: ``__. - - """ - - outputs = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')(output, target) - return outputs +softmax_cross_entropy_with_logits = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') def sigmoid_cross_entropy(output, target, name=None): From f23ad19ca888a589165adb95eb55a1aa4ee94b85 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 28 Jun 2021 16:31:38 +0800 Subject: [PATCH 28/36] update docs --- README.md | 4 ++-- docs/user/installation.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3fb68630..28f38b7f9 100644 --- a/README.md +++ b/README.md @@ -99,12 +99,12 @@ pip3 install --upgrade tensorlayer[all] # all additional dependenci pip3 install --upgrade tensorlayer[extra] # only the `extra` dependencies pip3 install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies ``` -If you want to use mindspore backend, you should install mindspore>=1.2.0 +If you want to use mindspore backend, you should install mindspore>=1.2.1 ```bash pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple ``` -If you want to use paddlepaddle backend, you should install paddlepaddle>=2.0 +If you want to use paddlepaddle backend, you should install paddlepaddle>=2.1.1 ```bash python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple ``` diff --git a/docs/user/installation.rst b/docs/user/installation.rst index 06d08fba9..d7e88cc70 100644 --- a/docs/user/installation.rst +++ b/docs/user/installation.rst @@ -29,14 +29,14 @@ TensorLayer supports multiple deep learning backends, default TensorFlow as back The installation instructions of TensorFlow are written to be very detailed on `TensorFlow`_ website. However, there are something need to be considered. For example, `TensorFlow`_ officially supports GPU acceleration for Linux, Mac OX and Windows at present. For ARM processor architecture, you need to install TensorFlow from source. -If you want to use mindspore backend, you should install mindspore==1.2.0. +If you want to use mindspore backend, you should install mindspore==1.2.1. .. code-block:: bash pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple -If you want to use paddlepaddle backend, you should install paddlepaddle==2.0. +If you want to use paddlepaddle backend, you should install paddlepaddle>=2.1.1 .. code-block:: bash From 41500d77a216ff83b996c7a3a39bc5dbad198f2b Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 30 Jun 2021 10:59:32 +0800 Subject: [PATCH 29/36] update backend --- docs/modules/cost.rst | 9 +--- docs/user/get_start_model.rst | 6 +-- tensorlayer/backend/ops/load_backend.py | 1 + tensorlayer/backend/ops/paddle_nn.py | 47 +++++++++++++++---- tensorlayer/initializers/__init__.py | 2 +- tensorlayer/layers/__init__.py | 2 +- .../layers/convolution/simplified_conv.py | 4 +- tensorlayer/layers/core/core_paddle.py | 12 +++++ 8 files changed, 59 insertions(+), 24 deletions(-) diff --git a/docs/modules/cost.rst b/docs/modules/cost.rst index eba52f4ca..6277b9d71 100644 --- a/docs/modules/cost.rst +++ b/docs/modules/cost.rst @@ -11,7 +11,7 @@ we can. So we encourage you to use TensorFlow's function, , see `TensorFlow API .. autosummary:: - cross_entropy + softmax_cross_entropy_with_logits sigmoid_cross_entropy binary_cross_entropy mean_squared_error @@ -28,12 +28,11 @@ we can. So we encourage you to use TensorFlow's function, , see `TensorFlow API maxnorm_regularizer maxnorm_o_regularizer maxnorm_i_regularizer - huber_loss Softmax cross entropy ---------------------- -.. autofunction:: cross_entropy +.. autofunction:: softmax_cross_entropy_with_logits Sigmoid cross entropy ---------------------- @@ -94,7 +93,3 @@ Special .. autofunction:: lo_regularizer .. autofunction:: maxnorm_o_regularizer .. autofunction:: maxnorm_i_regularizer - -Huber Loss -^^^^^^^^^^ -.. autofunction:: huber_loss \ No newline at end of file diff --git a/docs/user/get_start_model.rst b/docs/user/get_start_model.rst index e5a1cf749..d900f6836 100644 --- a/docs/user/get_start_model.rst +++ b/docs/user/get_start_model.rst @@ -120,7 +120,7 @@ Switching train/test modes ... # testing code here # method 2: Using packaged training modules - model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) + model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds) Reuse weights @@ -192,13 +192,13 @@ Save weights only MLP.save_weights('./model_weights.npz') # by default, file will be in hdf5 format MLP.load_weights('./model_weights.npz') -Save model architecture and weights (optional) +Save model weights (optional) ----------------------------------------------- .. code-block:: python # When using packaged training modules. Saving and loading the model can be done as follows - model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) + model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) model.train(n_epoch=n_epoch, train_dataset=train_ds) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 4f4d01a25..5b5be5599 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -7,6 +7,7 @@ BACKEND = 'tensorflow' # BACKEND = 'mindspore' +# BACKEND = 'paddle' # Check for backend.json files tl_backend_dir = os.path.expanduser('~') diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index c662e400e..dc4a4010e 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -431,16 +431,22 @@ class Conv2D(object): def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = k_size[0] if self.data_format is 'NHWC': - self.dg_stride = strides[1] - self.dg_dilation = dilations[1] + self._stride = (strides[1], strides[2]) + self._dilation = (dilations[1], dilations[2]) elif self.data_format is 'NCHW': - self.dg_stride = strides[2] - self.dg_dilation = dilations[2] + self._stride = (strides[2], strides[3]) + self._dilation = (dilations[2], dilations[3]) + def __call__(self, inputs, filters): - raise NotImplementedError + outputs = F.conv2d(x=inputs, + weight=filters, + stride=self._stride, + dilation=self._dilation, + padding=self.padding, + data_format=self.data_format) + return outputs def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): @@ -468,7 +474,20 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) ------- A Tensor. Has the same type as input. """ - raise NotImplementedError + data_format, padding = preprocess_2d_format(data_format, padding) + if data_format is 'NHWC': + _stride = (strides[1], strides[2]) + _dilation = (dilations[1], dilations[2]) + elif data_format is 'NCHW': + _stride = (strides[2], strides[3]) + _dilation = (dilations[2], dilations[3]) + outputs = F.conv2d(x=input, + weight=filters, + stride=_stride, + dilation=_dilation, + padding=padding, + data_format=data_format) + return outputs class Conv3D(object): @@ -577,10 +596,18 @@ class MaxPool(object): def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) self.ksize = ksize - self.strides = strides + if self.data_format is 'NHWC': + self._stride = (strides[1], strides[2]) + elif self.data_format is 'NCHW': + self._stride = (strides[2], strides[3]) def __call__(self, inputs): - raise NotImplementedError + outputs = F.max_pool2d(x=inputs, + kernel_size=self.ksize, + stride=self._stride, + padding=self.padding, + data_format=self.data_format) + return outputs def max_pool(input, ksize, strides, padding, data_format=None): @@ -951,7 +978,7 @@ def __init__(self): pass def __call__(self, *args, **kwargs): - raise NotImplementedError + pd.nn.BatchNorm2D class GroupConv2D(object): diff --git a/tensorlayer/initializers/__init__.py b/tensorlayer/initializers/__init__.py index 908d53aa1..ef8c65fe0 100644 --- a/tensorlayer/initializers/__init__.py +++ b/tensorlayer/initializers/__init__.py @@ -5,7 +5,7 @@ # 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', # 'deconv2d_bilinear_upsampling_initializer', 'He_Normal' # ] - +from .load_initializers_backend import Initializer from .load_initializers_backend import Zeros from .load_initializers_backend import Ones from .load_initializers_backend import Constant diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py index 309d5861d..d67024381 100644 --- a/tensorlayer/layers/__init__.py +++ b/tensorlayer/layers/__init__.py @@ -18,7 +18,7 @@ from .padding import * from .pooling import * from .quantize import * -# from .recurrent import * +from .recurrent import * from .scale import * from .shape import * from .spatial_transformer import * diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index a3d08f247..49fd002f2 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -188,7 +188,7 @@ class Conv2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([8, 3, 400, 400], name='input') + >>> net = tl.layers.Input([8, 400, 400, 3], name='input') >>> conv2d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_1') >>> print(conv2d) >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_2')(net) @@ -630,7 +630,7 @@ class DeConv2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([8, 3, 400, 400], name='input') + >>> net = tl.layers.Input([8, 400, 400, 3], name='input') >>> conv2d_transpose = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_transpose_1') >>> print(conv2d_transpose) >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_transpose_2')(net) diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index 22a6021b0..db692404b 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -191,6 +191,18 @@ def __call__(self, *inputs, **kwargs): return outputs def _get_weights(self, var_name, shape, init=None, trainable=True, transposed=None): + # TODO 2D mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w] + # TODO 2D mindspore transposed shape [in_channel, out_channel, kernel_h, kernel_w] + if len(shape) == 3: + shape = shape[::-1] + if len(shape) == 4: + if not transposed and self.data_format == 'NHWC': + shape = (shape[3], shape[0], shape[1], shape[2]) + else: + shape = (shape[3], shape[2], shape[0], shape[1]) + if len(shape) == 5: + shape = (shape[4], shape[3], shape[0], shape[1], shape[2]) + if var_name in ["filters", "weights"]: w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False) elif var_name in ["biases"]: From fd6b2603e3676f0721c29b59feb7fef3182307c1 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 5 Jul 2021 17:37:17 +0800 Subject: [PATCH 30/36] fix tensorflow core --- tensorlayer/layers/core/core_tensorflow.py | 64 ++++++++++------- tensorlayer/visualize.py | 80 ++++++++++++++++++---- 2 files changed, 106 insertions(+), 38 deletions(-) diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index 01fa79394..41158d34c 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -93,9 +93,9 @@ def __init__(self, name=None, act=None, *args, **kwargs): self._nodes_fixed = False # Layer weight state - self._all_weights = [] - self._trainable_weights = [] - self._nontrainable_weights = [] + self._all_weights = None + self._trainable_weights = None + self._nontrainable_weights = None # layer forward state self._forward_state = False @@ -333,15 +333,19 @@ def trainable_weights(self): """ - self.get_weights() - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] ==True: - self._trainable_weights.append(params[1]) + if self._trainable_weights is not None and len(self._trainable_weights) > 0: + # self._trainable_weights already extracted, so do nothing + pass + else: + self._trainable_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] ==True: + self._trainable_weights.append(params[1]) return self._trainable_weights @property @@ -352,14 +356,19 @@ def nontrainable_weights(self): """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] == False: - self._nontrainable_weights.append(params[1]) + if self._nontrainable_weights is not None and len(self._nontrainable_weights) > 0: + # self._nontrainable_weights already extracted, so do nothing + pass + else: + self._nontrainable_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] == False: + self._nontrainable_weights.append(params[1]) return self._nontrainable_weights @property @@ -370,11 +379,16 @@ def all_weights(self): """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - for par, val in params: - self._all_weights.append(val) + if self._all_weights is not None and len(self._all_weights) > 0: + # self._all_weights already extracted, so do nothing + pass + else: + self._all_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + for par, val in params: + self._all_weights.append(val) return self._all_weights def get_weights(self, expand=True): diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 72c1b184c..ad05acffe 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -5,9 +5,9 @@ import imageio import numpy as np - import tensorlayer as tl from tensorlayer.lazy_imports import LazyImport +import colorsys, random cv2 = LazyImport("cv2") @@ -16,18 +16,9 @@ # matplotlib.use('Agg') __all__ = [ - 'read_image', - 'read_images', - 'save_image', - 'save_images', - 'draw_boxes_and_labels_to_image', - 'draw_mpii_people_to_image', - 'frame', - 'CNN2d', - 'images2d', - 'tsne_embedding', - 'draw_weights', - 'W', + 'read_image', 'read_images', 'save_image', 'save_images', 'draw_boxes_and_labels_to_image', + 'draw_mpii_people_to_image', 'frame', 'CNN2d', 'images2d', 'tsne_embedding', 'draw_weights', 'W', + 'draw_boxes_and_labels_to_image_with_json' ] @@ -662,3 +653,66 @@ def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig W = draw_weights + + +def draw_boxes_and_labels_to_image_with_json(image, json_result, class_list, save_name=None): + """Draw bboxes and class labels on image. Return the image with bboxes. + + Parameters + ----------- + image : numpy.array + The RGB image [height, width, channel]. + json_result : list of dict + The object detection result with json format. + classes_list : list of str + For converting ID to string on image. + save_name : None or str + The name of image file (i.e. image.png), if None, not to save image. + + Returns + ------- + numpy.array + The saved image. + + References + ----------- + - OpenCV rectangle and putText. + - `scikit-image `__. + + """ + image_h, image_w, _ = image.shape + num_classes = len(class_list) + hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] + colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) + colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) + random.seed(0) + random.shuffle(colors) + random.seed(None) + bbox_thick = int(0.6 * (image_h + image_w) / 600) + fontScale = 0.5 + + for bbox_info in json_result: + image_name = bbox_info['image'] + category_id = bbox_info['category_id'] + if category_id < 0 or category_id > num_classes: continue + bbox = bbox_info['bbox'] # the order of coordinates is [x1, y2, x2, y2] + score = bbox_info['score'] + + bbox_color = colors[category_id] + c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) + cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) + + bbox_mess = '%s: %.2f' % (class_list[category_id], score) + t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] + c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) + cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) + + cv2.putText( + image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), + bbox_thick // 2, lineType=cv2.LINE_AA + ) + + if save_name is not None: + save_image(image, save_name) + + return image From 339fb039e063f15a0e559543a277f2003ff3d56e Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Tue, 6 Jul 2021 16:39:13 +0800 Subject: [PATCH 31/36] add LayerList --- .../initializers/paddle_initializers.py | 48 +++++- tensorlayer/layers/core/__init__.py | 2 - tensorlayer/layers/core/core_tensorflow.py | 155 +++++++++++++++--- tensorlayer/layers/deprecated.py | 7 - 4 files changed, 183 insertions(+), 29 deletions(-) diff --git a/tensorlayer/initializers/paddle_initializers.py b/tensorlayer/initializers/paddle_initializers.py index 18e69ed9e..d332be15a 100644 --- a/tensorlayer/initializers/paddle_initializers.py +++ b/tensorlayer/initializers/paddle_initializers.py @@ -9,10 +9,56 @@ import paddle __all__ = [ - 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', + 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' ] +class Initializer(object): + """Initializer base class: all initializers inherit from this class. + """ + + def __call__(self, shape, dtype=None): + """Returns a tensor object initialized as specified by the initializer. + + Parameters + ---------- + shape : tuple of int. + The shape of the tensor. + dtype : Optional dtype of the tensor. + If not provided will return tensor of `tl.float32`. + + Returns + ------- + + """ + raise NotImplementedError + + def get_config(self): + """Returns the configuration of the initializer as a JSON-serializable dict. + + Returns + ------- + A JSON-serializable Python dict. + """ + return {} + + @classmethod + def from_config(cls, config): + """Instantiates an initializer from a configuration dictionary. + + Parameters + ---------- + config : A python dictionary. + It will typically be the output of `get_config`. + + Returns + ------- + An Initializer instance. + """ + if 'dtype' in config: + config.pop('dtype') + return cls(**config) + class Zeros(ConstantInitializer): """Initializer that generates tensors initialized to 0. diff --git a/tensorlayer/layers/core/__init__.py b/tensorlayer/layers/core/__init__.py index a3b3f95c7..d9d96891e 100644 --- a/tensorlayer/layers/core/__init__.py +++ b/tensorlayer/layers/core/__init__.py @@ -9,7 +9,5 @@ from .core_tensorflow import * elif BACKEND == 'paddle': from .core_paddle import * -elif BACKEND == 'dragon': - from .core_dragon import * else: raise ("Unsupported backend:", BACKEND) diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index 41158d34c..f6a1dec5a 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -9,7 +9,7 @@ from tensorlayer.layers.utils import (get_variable_with_initializer) from tensorlayer import logging -__all__ = ['Module', 'SequentialLayer'] +__all__ = ['Module', 'SequentialLayer', 'LayerList'] _global_layer_name_dict = {} Parameter_ = tf.Variable @@ -606,19 +606,19 @@ def __init__(self, *args): def __getitem__(self, index): if isinstance(index, slice): return self.__class__(OrderedDict(list(self._layers.items())[index])) - index = self._valid_index(len(self), index) + index = _valid_index(len(self), index) return list(self._layers.values())[index] def __setitem__(self, index, layer): - if self._valid_module(layer): - index = self._valid_index(len(self), index) + if _valid_module(layer): + index = _valid_index(len(self), index) key = list(self._layers.keys())[index] self._layers[key] = layer self.layer_list = list(self._layers.values()) def __delitem__(self, index): if isinstance(index, int): - index = self._valid_index(len(self), index) + index = _valid_index(len(self), index) key = list(self._layers.keys())[index] del self._layers[key] elif isinstance(index, slice): @@ -633,7 +633,7 @@ def __len__(self): return len(self._layers) def append(self, layer): - if self._valid_module(layer): + if _valid_module(layer): self._layers[str(len(self))] = layer self.layer_list = list(self._layers.values()) return self @@ -646,16 +646,133 @@ def forward(self, input_data): input_data = layer(input_data) return input_data - def _valid_index(self, layer_num, index): - if not isinstance(index, int): - raise TypeError("Index {} is not int type") - if not -layer_num <= index < layer_num: - raise IndexError( - "Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index) - ) - return index % layer_num - - def _valid_module(self, layer): - if issubclass(layer.__class__, Module): - return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) + +class LayerList(Module): + """ + Holds Modules in a list. + + LayerList can be used like a regular Python list, support + '__getitem__', '__setitem__', '__delitem__', '__len__', '__iter__' and '__iadd__', + but module it contains are properly registered, and will be visible by all Modules methods. + + Parameters + ---------- + args : list + List of subclass of Module. + Methods + --------- + __init__() + Initializing the Layer. + insert() + Inserts a given layer before a given index in the list. + extend() + Appends layers from a Python iterable to the end of the list. + append() + Appends a given layer to the end of the list. + + Examples + --------- + Args: + args (list, optional): List of subclass of Module. + + Examples: + + """ + def __init__(self, *args, **kwargs): + super(LayerList, self).__init__() + if len(args) == 1: + self.extend(args[0]) + + def __getitem__(self, index): + if isinstance(index, slice): + return self.__class__(list(self._layers.values())[index]) + if isinstance(index, int): + index = _valid_index(len(self), index) + return self._layers[str(index)] + raise TypeError('Index {} is not int type or slice type'.format(index)) + + def __setitem__(self, index, layer): + if not isinstance(index, int) and _valid_module(layer): + raise TypeError('Index {} is not int type'.format(index)) + index = _valid_index(len(self), index) + self._layers[str(index)] = layer + + def __delitem__(self, index): + if isinstance(index, int): + index = _valid_index(len(self), index) + del self._layers[str(index)] + elif isinstance(index, slice): + keys = list(self._layers.keys())[index] + for key in keys: + del self._layers[key] + else: + raise TypeError('Index {} is not int type or slice type'.format(index)) + temp_dict = OrderedDict() + for idx, layer in enumerate(self._layers.values()): + temp_dict[str(idx)] = layer + self._layers = temp_dict + + def __len__(self): + return len(self._layers) + + def __iter__(self): + return iter(self._layers.values()) + + def __iadd__(self, layers): + self.extend(layers) + return self + + def insert(self, index, layer): + """ + Inserts a given layer before a given index in the list. + + """ + + idx = _valid_index(len(self), index) + _valid_module(layer) + length = len(self) + while length > idx: + self._layers[str(length)] = self._layers[str(length - 1)] + length -= 1 + self._layers[str(idx)] = layer + + def extend(self, layers): + """ + Appends layers from a Python iterable to the end of the list. + + """ + + if not isinstance(layers, list): + raise TypeError('Modules {} should be list of sublayers'.format(layers)) + for layer in layers: + if _valid_module(layer): + self._layers[str(len(self))] = layer + return self + + def append(self, layer): + """ + Appends a given layer to the end of the list. + + """ + + if _valid_module(layer): + self._layers[str(len(self))] = layer + + def forward(self, *inputs): + raise NotImplementedError + + +def _valid_index(layer_num, index): + if not isinstance(index, int): + raise TypeError("Index {} is not int type") + if not -layer_num <= index < layer_num: + raise IndexError( + "Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index) + ) + return index % layer_num + + +def _valid_module(layer): + if issubclass(layer.__class__, Module): + return True + raise TypeError('Module {} is not subclass of Module'.format(layer)) \ No newline at end of file diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index 6b1cfe8ca..cc44742d1 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -416,13 +416,6 @@ def TimeDistributedLayer(*args, **kwargs): raise NonExistingLayerError("TimeDistributedLayer is removed for TF 2.0, please use eager mode instead." + __log__) -__all__ += ['LayerList'] - - -def LayerList(*args, **kwargs): - raise NonExistingLayerError("LayerList(list)(input_data) --> SequentialLayer(list)(input_data)" + __log__) - - __all__ += ['ModelLayer'] From 81e6c21412acd75c01c375f0640fa5e69d03a777 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Tue, 6 Jul 2021 17:16:31 +0800 Subject: [PATCH 32/36] add tutorial LayerList --- .../basic_tutorials/tutorial_LayerList.py | 34 +++++++++++++++++++ tensorlayer/layers/core/core_tensorflow.py | 5 ++- 2 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 examples/basic_tutorials/tutorial_LayerList.py diff --git a/examples/basic_tutorials/tutorial_LayerList.py b/examples/basic_tutorials/tutorial_LayerList.py new file mode 100644 index 000000000..23d480fc7 --- /dev/null +++ b/examples/basic_tutorials/tutorial_LayerList.py @@ -0,0 +1,34 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from tensorlayer.layers import Module, LayerList, Dense +import tensorlayer as tl + +d1 = Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1') +d2 = Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2') +d3 = Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3') + +layer_list = LayerList([d1, d2]) +# Inserts a given d2 before a given index in the list +layer_list.insert(1, d2) +layer_list.insert(2, d2) +# Appends d2 from a Python iterable to the end of the list. +layer_list.extend([d2]) +# Appends a given d3 to the end of the list. +layer_list.append(d3) + +print(layer_list) + +class model(Module): + def __init__(self): + super(model, self).__init__() + self._list = layer_list + def forward(self, inputs): + output = self._list[0](inputs) + for i in range(1, len(self._list)): + output = self._list[i](output) + return output + +net = model() +print(net) +print(net(tl.layers.Input((10, 784)))) \ No newline at end of file diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index f6a1dec5a..46bdc5fc1 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -678,10 +678,9 @@ class LayerList(Module): Examples: """ - def __init__(self, *args, **kwargs): + def __init__(self, args): super(LayerList, self).__init__() - if len(args) == 1: - self.extend(args[0]) + self.extend(args) def __getitem__(self, index): if isinstance(index, slice): From 093b1df5ba0888b54dd40ed9a8b18127fecc8753 Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Mon, 12 Jul 2021 09:44:14 +0800 Subject: [PATCH 33/36] update paddle core , add paddle tutorial --- .../tutorial_cifar10_cnn_paddle_backend.py | 165 ++++++++++++++++++ .../basic_tutorials/tutorial_mnist_simple.py | 5 +- tensorlayer/backend/ops/paddle_nn.py | 59 ++++++- .../layers/convolution/super_resolution.py | 4 +- tensorlayer/layers/core/core_paddle.py | 28 ++- 5 files changed, 240 insertions(+), 21 deletions(-) create mode 100644 examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py new file mode 100644 index 000000000..133780bc3 --- /dev/null +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py @@ -0,0 +1,165 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'paddle' + +import time +import numpy as np +import multiprocessing +import tensorflow as tf +import paddle as pd +from tensorlayer.layers import Module +import tensorlayer as tl +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) + +# enable debug logging +tl.logging.set_verbosity(tl.logging.DEBUG) +tl.logging.set_verbosity(tl.logging.DEBUG) + +# prepare cifar10 data +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + +class CNN(Module): + + def __init__(self): + super(CNN, self).__init__() + # weights init + W_init = tl.initializers.truncated_normal(stddev=5e-2) + W_init2 = tl.initializers.truncated_normal(stddev=0.04) + b_init2 = tl.initializers.constant(value=0.1) + + self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) + self.bn1 = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') + + self.conv2 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv2', in_channels=64) + self.bn2 = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') + + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) + self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) + self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) + + def forward(self, x): + z = self.conv1(x) + z = self.bn1(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.bn2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense2(z) + z = self.dense3(z) + return z + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def generator_test(): + inputs = X_test + targets = y_test + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +def _map_fn_test(img, target): + # 1. Crop the central [height, width] of the image. + img = tf.image.resize_with_pad(img, 24, 24) + # 2. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + img = tf.reshape(img, (24, 24, 3)) + target = tf.reshape(target, ()) + return img, target + + +# get the network +net = CNN() + +# training settings +batch_size = 128 +n_epoch = 500 +learning_rate = 0.0001 +print_freq = 5 +shuffle_buffer_size = 128 +metrics = tl.metric.Accuracy() + +train_weights = net.trainable_weights +optimizer = tl.optimizers.Adam(learning_rate) +# looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py + +# dataset API and augmentation +train_ds = tf.data.Dataset.from_generator( + generator_train, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) +# train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) +# value = train_ds.make_one_shot_iterator().get_next() + +test_ds = tf.data.Dataset.from_generator( + generator_test, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +# test_ds = test_ds.shuffle(shuffle_buffer_size) +test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) +# test_ds = test_ds.repeat(n_epoch) +test_ds = test_ds.prefetch(buffer_size=4096) +test_ds = test_ds.batch(batch_size) +# value_test = test_ds.make_one_shot_iterator().get_next() + +for epoch in range(n_epoch): + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_ds: + start_time = time.time() + X_batch = pd.to_tensor(X_batch.numpy(), dtype=tl.float32) + y_batch = pd.to_tensor(y_batch.numpy(), dtype=tl.int64) + net.set_train() + + output = net(X_batch) + loss = pd.nn.functional.cross_entropy(output, y_batch) + loss_ce = loss.numpy() + params_grads = optimizer.gradient(loss, train_weights) + optimizer.apply_gradients(params_grads) + + train_loss += loss_ce + + if metrics: + metrics.update(output, y_batch) + train_acc += metrics.result() + metrics.reset() + n_iter += 1 + + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 20847d994..6aa2d089b 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -9,7 +9,7 @@ import tensorlayer as tl from tensorlayer.layers import Module -from tensorlayer.layers import Dense, Dropout, Flatten +from tensorlayer.layers import Dense, Dropout from tensorlayer.dataflow import Dataset @@ -65,11 +65,12 @@ def forward(self, x, foo=None): train_weights = MLP.trainable_weights optimizer = tl.optimizers.Momentum(0.05, 0.9) metric = tl.metric.Accuracy() +loss_fn = tl.cost.softmax_cross_entropy_with_logits train_dataset = mnistdataset(data = X_train, label = y_train) train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) -model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) +model = tl.models.Model(network=MLP, loss_fn=loss_fn, optimizer=optimizer, metrics=metric) model.train(n_epoch=n_epoch, train_dataset=train_loader, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index dc4a4010e..798d2a76f 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -46,10 +46,10 @@ def preprocess_1d_format(data_format, padding): str "NWC" or "NCW" and "SAME" or "VALID" """ - if data_format in ["channels_last", "NWC"]: - data_format = "NWC" - elif data_format in ["channels_first", "NCW"]: - data_format = "NCW" + if data_format in ["channels_last", "NWC", "NLC"]: + data_format = "NLC" + elif data_format in ["channels_first", "NCW", "NCL"]: + data_format = "NCL" elif data_format == None: data_format = None else: @@ -974,11 +974,54 @@ def conv3d_transpose( class BatchNorm(object): - def __init__(self): - pass + def __init__(self, decay=0.9, epsilon=0.00001, beta=None, gamma=None, moving_mean=None, moving_var=None, num_features=None, + data_format='channels_last', is_train=False): + self.decay = decay + self.epsilon = epsilon + self.data_format = data_format + self.beta = beta + self.gamma = gamma + self.moving_mean = moving_mean + self.moving_var = moving_var + self.num_features = num_features + self.is_train = is_train + self.axes = None + + + def __call__(self, inputs): + data_format = self.channel_format(inputs) + outputs = pd.nn.functional.batch_norm( + inputs, + self.moving_mean, + self.moving_var, + weight=self.gamma, + bias=self.beta, + training=self.is_train, + momentum=self.decay, + epsilon=self.epsilon, + data_format=data_format + ) + return outputs - def __call__(self, *args, **kwargs): - pd.nn.BatchNorm2D + def channel_format(self, inputs): + """ return "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". """ + len_in_shape = len(inputs.shape) + if len_in_shape == 2: + return 'NC' + if self.data_format == 'channels_last': + if len_in_shape == 3: + return 'NLC' + if len_in_shape == 4: + return 'NHWC' + if len_in_shape == 5: + return 'NDHWC' + if self.data_format == 'channels_first': + if len_in_shape == 3: + return 'NCL' + if len_in_shape == 4: + return 'NCHW' + if len_in_shape == 5: + return 'NCDHW' class GroupConv2D(object): diff --git a/tensorlayer/layers/convolution/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py index 102ef52e2..fe66ea542 100644 --- a/tensorlayer/layers/convolution/super_resolution.py +++ b/tensorlayer/layers/convolution/super_resolution.py @@ -163,11 +163,11 @@ def __init__( self._built = True logging.info( "SubpixelConv2d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, scale, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(in_channels={in_channels}, out_channels={n_out_channels}') s += (', ' + actstr) if self.name is not None: diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index db692404b..ba0c855cb 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -9,6 +9,7 @@ from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.dygraph.base import program_desc_tracing_guard, param_guard from paddle.fluid.dygraph import parallel_helper +import paddle as pd _global_layer_name_dict = {} @@ -196,25 +197,34 @@ def _get_weights(self, var_name, shape, init=None, trainable=True, transposed=No if len(shape) == 3: shape = shape[::-1] if len(shape) == 4: - if not transposed and self.data_format == 'NHWC': + if transposed: shape = (shape[3], shape[0], shape[1], shape[2]) else: shape = (shape[3], shape[2], shape[0], shape[1]) if len(shape) == 5: shape = (shape[4], shape[3], shape[0], shape[1], shape[2]) - if var_name in ["filters", "weights"]: - w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False) - elif var_name in ["biases"]: - w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=True) - else: - w_tmp = self.create_parameter(shape=shape, attr=init) + # if var_name in ["filters", "weights"]: + # var_name = self.name + "/" + var_name + # w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False, trainable=trainable, var_name=var_name) + # elif var_name in ["biases"]: + # var_name = self.name + "/" + var_name + # w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=True, trainable=trainable, var_name=var_name) + # else: + var_name = self.name + "/" + var_name + w_tmp = self.create_parameter(shape=shape, attr=init, var_name=var_name, trainable=trainable) self.trainable = trainable + return w_tmp - def create_parameter(self, shape, attr=None, dtype=None, is_bias=False, default_initializer=None): + def create_parameter(self, shape, attr=None, dtype=None, is_bias=False, default_initializer=None, trainable=True, var_name=None): """Create parameters for this layer.""" - temp_attr = copy.deepcopy(attr) + init_attr = pd.ParamAttr( + name=var_name, + initializer=attr, + trainable=trainable, + do_model_average=True) + temp_attr = copy.deepcopy(init_attr) if isinstance(temp_attr, six.string_types) and temp_attr == "": temp_attr = None return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, default_initializer) From 608b722659f851bf6708428408faae182ea82ab0 Mon Sep 17 00:00:00 2001 From: hanjr Date: Mon, 12 Jul 2021 15:30:58 +0800 Subject: [PATCH 34/36] update vision --- tensorlayer/__init__.py | 1 + tensorlayer/layers/convolution/super_resolution.py | 4 ++-- tensorlayer/vision/tensorflow_vision.py | 10 ++++++---- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index 442dce1f5..be46822de 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -51,6 +51,7 @@ from tensorlayer import utils from tensorlayer import dataflow from tensorlayer import metric + from tensorlayer import vision from tensorlayer.lazy_imports import LazyImport diff --git a/tensorlayer/layers/convolution/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py index fe66ea542..0b9339fe6 100644 --- a/tensorlayer/layers/convolution/super_resolution.py +++ b/tensorlayer/layers/convolution/super_resolution.py @@ -61,11 +61,11 @@ def __init__( logging.info( "SubpixelConv1d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, scale, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(in_channels={in_channels}, out_channels={out_channels}') s += (', ' + actstr) if self.name is not None: diff --git a/tensorlayer/vision/tensorflow_vision.py b/tensorlayer/vision/tensorflow_vision.py index 95609a23a..cc9595ce3 100644 --- a/tensorlayer/vision/tensorflow_vision.py +++ b/tensorlayer/vision/tensorflow_vision.py @@ -642,7 +642,8 @@ def normalize(image, mean, std, data_format): A Tensor with the same shape and dtype as image. ------- ''' - image = ops.convert_to_tensor(image, dtype=tf.float32) + image = ops.convert_to_tensor(image, name='image') + image = math_ops.cast(image, dtype=tf.float32) image = _AssertAtLeast3DImage(image) if data_format == 'CHW': @@ -668,13 +669,14 @@ def normalize(image, mean, std, data_format): mean = np.float32(np.array(mean).reshape((1, 1, -1))) std = np.float32(np.array(std).reshape((1, 1, -1))) - mean = ops.convert_to_tensor(mean, dtype=image.dtype) - std = ops.convert_to_tensor(std, dtype=image.dtype) + mean = ops.convert_to_tensor(mean) + mean = math_ops.cast(mean, dtype=tf.float32) + std = ops.convert_to_tensor(std) + std = math_ops.cast(std, dtype=tf.float32) image -= mean image = math_ops.divide(image, std) return image - def standardize(image): ''' Reference to tf.image.per_image_standardization(). From 96687792936d44c3803c611de315980ee6b4b8ec Mon Sep 17 00:00:00 2001 From: Eric Lai Date: Wed, 14 Jul 2021 09:23:28 +0800 Subject: [PATCH 35/36] update backend --- tensorlayer/backend/ops/paddle_nn.py | 108 +++++++++++++++--- .../layers/convolution/simplified_conv.py | 3 - 2 files changed, 92 insertions(+), 19 deletions(-) diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 798d2a76f..672752724 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -128,7 +128,15 @@ def nchw_to_nhwc(x): channels last tensor data """ - pass + if len(x.shape) == 3: + x = pd.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = pd.transpose(x, (0, 2, 3, 1)) + elif len(x.shape) == 5: + x = pd.transpose(x, (0, 2, 3, 4, 1)) + else: + raise Exception("Unsupported dimensions") + return x def nhwc_to_nchw(x): @@ -145,7 +153,15 @@ def nhwc_to_nchw(x): channels first tensor data """ - pass + if len(x.shape) == 3: + x = pd.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = pd.transpose(x, (0, 3, 1, 2)) + elif len(x.shape) == 5: + x = pd.transpose(x, (0, 4, 1, 2, 3)) + else: + raise Exception("Unsupported dimensions") + return x class ReLU(object): @@ -360,12 +376,22 @@ class BiasAdd(object): ------- A Tensor with the same type as value. """ - - def __init__(self, data_format='NHWC'): - self.data_format = data_format + def __init__(self, data_format='channels_first'): + super(BiasAdd, self).__init__() + if data_format in ['channels_first', 'NCW', 'NCHW', 'NCDHW']: + self.data_format = 'channels_first' + elif data_format in ['channels_last', 'NWC', 'NHWC', 'NDHWC']: + self.data_format = 'channels_last' + else: + raise ("Unsupported data format: " + str(data_format)) def __call__(self, x, bias): - return pd.add(x, bias) + if self.data_format == 'channels_first': + x = nchw_to_nhwc(x) + outputs = pd.add(x, bias) + if self.data_format == 'channels_first': + outputs = nhwc_to_nchw(outputs) + return outputs def bias_add(x, bias): @@ -387,12 +413,27 @@ def bias_add(x, bias): ------- A Tensor with the same type as value. """ - raise NotImplementedError + + #TODO the bias_add only supports channels_last + outputs = pd.add(x, bias) + return outputs class Conv1D(object): - pass - # raise NotImplementedError + def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None): + super(Conv1D, self).__init__() + self.data_format, self.padding = preprocess_1d_format(padding=padding, data_format=data_format) + self.stride = stride + self.dilations = dilations + + def __call__(self, input, filters): + output = F.conv1d(x=input, + weight=filters, + stride=self.stride, + dilation=self.dilations, + data_format=self.data_format, + padding=self.padding) + return output def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): @@ -424,7 +465,14 @@ def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, n A Tensor. Has the same type as input. """ - pass + outputs = F.conv1d(x=input, + weight=filters, + stride=stride, + padding=padding, + data_format=data_format, + dilation=dilations, + name=name) + return outputs class Conv2D(object): @@ -491,8 +539,23 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) class Conv3D(object): - pass - # raise NotImplementedError + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format is 'NDHWC': + self._strides = (strides[1], strides[2], strides[3]) + self._dilations = (dilations[1], dilations[2], dilations[3]) + elif data_format is 'NCDHW': + self._strides = (strides[2], strides[3], strides[4]) + self._dilations = (dilations[2], dilations[3], dilations[4]) + + def __call__(self, input, filters): + outputs = F.conv3d(x=input, + weight=filters, + stride=self._strides, + dilation=self._dilations, + data_format=self.data_format, + padding=self.padding) + return outputs def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): @@ -507,7 +570,7 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None filters : tensor Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. in_channels must match between input and filters. - strides : list of ints + strides : tuple of ints A list of ints that has length >= 5. 1-D tensor of length 5. The stride of the sliding window for each dimension of input. Must have strides[0] = strides[4] = 1. @@ -517,7 +580,7 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - dilations : list of ints + dilations : touple of ints Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of data_format, see above for details. @@ -529,8 +592,21 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None ------- A Tensor. Has the same type as input. """ - - raise NotImplementedError + data_format, padding = preprocess_3d_format(data_format, padding) + if data_format is 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + _dilations = (dilations[1], dilations[2], dilations[3]) + elif data_format is 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + _dilations = (dilations[2], dilations[3], dilations[4]) + outputs = F.conv3d(x=input, + weight=filters, + stride=_strides, + dilation=_dilations, + data_format=data_format, + padding=padding, + name=name) + return outputs def lrn(inputs, depth_radius, bias, alpha, beta): diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index 49fd002f2..1c8d13adb 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -415,9 +415,6 @@ def build(self, inputs_shape): self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - self.b_init_flag = False if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) From b2684136e39ddc5e90f24aaf5529f0fc4be937cc Mon Sep 17 00:00:00 2001 From: Eric_lai Date: Wed, 21 Jul 2021 15:36:05 +0800 Subject: [PATCH 36/36] Update backends --- tensorlayer/backend/ops/__init__.py | 6 +- tensorlayer/backend/ops/load_backend.py | 4 +- tensorlayer/backend/ops/mindspore_backend.py | 2 +- tensorlayer/backend/ops/mindspore_nn.py | 33 ++++++++++ tensorlayer/backend/ops/paddle_backend.py | 18 ++---- tensorlayer/backend/ops/paddle_nn.py | 65 ++++++++++++++++++-- tensorlayer/backend/ops/tensorflow_nn.py | 34 ++++++++++ tensorlayer/layers/pooling.py | 35 +++++------ 8 files changed, 155 insertions(+), 42 deletions(-) diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 1cef00995..f5eea8684 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -33,8 +33,6 @@ from .load_backend import GroupConv2D from .load_backend import BinaryConv2D from .load_backend import DorefaConv2D -from .load_backend import MaxPool1d -from .load_backend import AvgPool1d from .load_backend import ReLU from .load_backend import ReLU6 @@ -47,8 +45,12 @@ from .load_backend import Conv2D from .load_backend import Conv3D from .load_backend import BiasAdd +from .load_backend import MaxPool1d from .load_backend import MaxPool +from .load_backend import MaxPool3d +from .load_backend import AvgPool1d from .load_backend import AvgPool +from .load_backend import AvgPool3d from .load_backend import Dropout from .load_backend import BatchNorm from .load_backend import DepthwiseConv2d diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 5b5be5599..72e120db6 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -5,9 +5,9 @@ import os import sys -BACKEND = 'tensorflow' +# BACKEND = 'tensorflow' # BACKEND = 'mindspore' -# BACKEND = 'paddle' +BACKEND = 'paddle' # Check for backend.json files tl_backend_dir = os.path.expanduser('~') diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index d0f86c052..59187cba9 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -612,7 +612,7 @@ class ReduceSum(Cell): def __init__(self, axis): super(ReduceSum, self).__init__() self.axis = axis - self.reduce_sum = P.ReduceSum(keep_dims=True) + self.reduce_sum = P.ReduceSum(keep_dims=False) def construct(self, input): return self.reduce_sum(input, self.axis) diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 36e0ff3fd..1babad3bc 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -852,6 +852,25 @@ def avg_pool(input, ksize, strides, padding): return outputs(input) +class MaxPool3d(Cell): + def __init__(self, ksize, strides, padding, data_format=None): + super(MaxPool3d, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format == 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + if data_format == 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + self.max_pool3d = P.MaxPool3D( + kernel_size=ksize, + strides=_strides, + padding=padding, + data_format=self.data_format) + + def __call__(self, inputs): + outputs = self.max_pool3d(inputs) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the max pooling on the input. @@ -882,6 +901,20 @@ def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): pass +class AvgPool3d(Cell): + def __init__(self, ksize, strides, padding, data_format=None): + super(AvgPool3d, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format == 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + if data_format == 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + raise NotImplementedError + + def __call__(self, inputs): + pass + + def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the average pooling on the input. diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py index 60c81fd63..3d8e75c88 100644 --- a/tensorlayer/backend/ops/paddle_backend.py +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -418,15 +418,10 @@ def construct(self, input): class ReduceMean(object): def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + self.axis = axis def __call__(self, inputs): - raise NotImplementedError + return pd.mean(inputs, axis=self.axis) def reduce_mean(input_tensor, axis=None): @@ -454,15 +449,10 @@ def reduce_mean(input_tensor, axis=None): class ReduceMax(object): def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + self.axis = axis def __call__(self, inputs): - raise NotImplementedError + return pd.max(inputs, axis=self.axis) def reduce_max(input_tensor, axis=None): diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 672752724..2a6ec2d2f 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -664,7 +664,12 @@ def __init__(self, ksize, strides, padding, data_format=None): self.strides = strides def __call__(self, inputs): - raise NotImplementedError + if self.data_format == 'NLC': + inputs = nhwc_to_nchw(inputs) + outputs = F.max_pool1d(inputs, self.ksize, self.strides, self.padding) + if self.data_format == 'NLC': + outputs = nchw_to_nhwc(outputs) + return outputs class MaxPool(object): @@ -720,7 +725,12 @@ def __init__(self, ksize, strides, padding, data_format=None): self.strides = strides def __call__(self, inputs): - raise NotImplementedError + if self.data_format == 'NLC': + inputs = nhwc_to_nchw(inputs) + outputs = F.avg_pool1d(inputs, self.ksize, self.strides, self.padding) + if self.data_format == 'NLC': + outputs = nchw_to_nhwc(outputs) + return outputs class AvgPool(object): @@ -728,10 +738,19 @@ class AvgPool(object): def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) self.filter_size = ksize - self.strides = strides + if self.data_format is 'NHWC': + self._stride = (strides[1], strides[2]) + elif self.data_format is 'NCHW': + self._stride = (strides[2], strides[3]) def __call__(self, inputs): - raise NotImplementedError + outputs = F.avg_pool2d( + inputs, + kernel_size=self.filter_size, + stride=self._stride, + padding=self.padding, + data_format=self.data_format) + return outputs def avg_pool(input, ksize, strides, padding): @@ -760,6 +779,25 @@ def avg_pool(input, ksize, strides, padding): pass +class MaxPool3d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + if self.data_format == 'NCDHW': + self.strides = (strides[2], strides[3], strides[4]) + if self.data_format == 'NDHWC': + self.strides = (strides[1], strides[2], strides[3]) + + def __call__(self, inputs): + outputs = F.max_pool3d( + inputs, + kernel_size=self.ksize, + stride=self.strides, + padding=self.padding, + data_format=self.data_format) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the max pooling on the input. @@ -790,6 +828,25 @@ def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): pass +class AvgPool3d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + if self.data_format == 'NCDHW': + self.strides = (strides[2], strides[3], strides[4]) + if self.data_format == 'NDHWC': + self.strides = (strides[1], strides[2], strides[3]) + + def __call__(self, inputs): + outputs = F.avg_pool3d( + inputs, + kernel_size=self.ksize, + stride=self.strides, + padding=self.padding, + data_format=self.data_format) + return outputs + + def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the average pooling on the input. diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 62970d7d8..1e8ac1142 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -795,6 +795,23 @@ def avg_pool(input, ksize, strides, padding): return outputs +class MaxPool3d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.max_pool3d( + input=inputs, + ksize=self.ksize, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None): """ Performs the max pooling on the input. @@ -834,6 +851,23 @@ def max_pool3d(input, ksize, strides, padding, data_format=None): return outputs +class AvgPool3d(object): + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.avg_pool3d( + input=inputs, + ksize=self.ksize, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + def avg_pool3d(input, ksize, strides, padding, data_format=None): """ Performs the average pooling on the input. diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index c51969d73..988471f24 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -467,15 +467,13 @@ def build(self, inputs_shape=None): self._strides = [1, 1, self.strides[0], self.strides[1], self.strides[2]] else: raise Exception("unsupported data format") + self.max_pool3d = tl.ops.MaxPool3d(ksize=self.filter_size, + strides=self._strides, + padding=self.padding, + data_format=self.data_format) def forward(self, inputs): - outputs = tl.ops.max_pool3d( - input=inputs, - ksize=self.filter_size, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - ) + outputs = self.max_pool3d(inputs) return outputs @@ -547,15 +545,14 @@ def build(self, inputs_shape=None): self.data_format = 'NCDHW' else: raise Exception("unsupported data format") + self.avg_pool3d = tl.ops.AvgPool3d(ksize=self.filter_size, + strides=self._strides, + padding=self.padding, + data_format=self.data_format + ) def forward(self, inputs): - outputs = tl.ops.avg_pool3d( - input=inputs, - ksize=self.filter_size, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - ) + outputs = self.avg_pool3d(inputs) return outputs @@ -878,17 +875,17 @@ def __repr__(self): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tl.reduce_mean(input_tensor=inputs, axis=[1, 2, 3]) + self.reduce_mean = tl.ReduceMean(axis=[1, 2, 3]) elif self.data_format == 'channels_first': - outputs = tl.reduce_mean(input_tensor=inputs, axis=[2, 3, 4]) + self.reduce_mean = tl.ReduceMean(axis=[2, 3, 4]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_mean(inputs) return outputs