From 703ae5d4aab1d52b074cc05c691a439253445525 Mon Sep 17 00:00:00 2001 From: Jactus Date: Wed, 2 Dec 2020 18:00:26 +0800 Subject: [PATCH] Update tft and readme --- README.md | 6 +++--- examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py | 2 +- examples/run_all_model.py | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b1a5f70929..1999160a1c 100644 --- a/README.md +++ b/README.md @@ -27,10 +27,10 @@ For more details, please refer to our paper ["Qlib: An AI-oriented Quantitative - [Data Preparation](#data-preparation) - [Auto Quant Research Workflow](#auto-quant-research-workflow) - [Building Customized Quant Research Workflow by Code](#building-customized-quant-research-workflow-by-code) -- [**Quant Model Zoo**](#quant-model-zoo) +- [Quant Model Zoo](#quant-model-zoo) - [Run a single model](#run-a-single-model) - [Run multiple models](#run-multiple-models) -- [**Quant Dataset Zoo**](#quant-dataset-zoo) +- [Quant Dataset Zoo](#quant-dataset-zoo) - [More About Qlib](#more-about-qlib) - [Offline Mode and Online Mode](#offline-mode-and-online-mode) - [Performance of Qlib Data Server](#performance-of-qlib-data-server) @@ -218,7 +218,7 @@ All the models listed above are runnable with ``Qlib``. Users can find the confi ## Run multiple models `Qlib` also provides a script [`run_all_model.py`](examples/run_all_model.py) which can run multiple models for several iterations. (**Note**: the script only supprots *Linux* now. Other OS will be supported in the future.) -The script will create a unique virtual environment for each model, and delete the environments after training. Thus, only experiment results such as `IC` and `backtest` results will be generated and stored. (**Note**: the script will erase your previous experiment records created by running itself.) +The script will create a unique virtual environment for each model, and delete the environments after training. Thus, only experiment results such as `IC` and `backtest` results will be generated and stored. Here is an example of running all the models for 10 iterations: ```python diff --git a/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py b/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py index 44a9284f7b..03c169b9bf 100644 --- a/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py +++ b/examples/benchmarks/TFT/data_formatters/qlib_Alpha158.py @@ -208,7 +208,7 @@ def get_default_model_params(self): model_params = { "dropout_rate": 0.4, - "hidden_layer_size": 16, + "hidden_layer_size": 160, "learning_rate": 0.0001, "minibatch_size": 128, "max_gradient_norm": 0.0135, diff --git a/examples/run_all_model.py b/examples/run_all_model.py index 78e01f6236..416f1b47f4 100644 --- a/examples/run_all_model.py +++ b/examples/run_all_model.py @@ -291,7 +291,8 @@ def run(times=1, models=None, exclude=False): pprint(errors) sys.stderr.write("\n") # move results folder - shutil.move(exp_path, exp_path + f"_{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}") + shutil.move(exp_path, exp_path + f"_{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}") + if __name__ == "__main__": fire.Fire(run) # run all the model