From afacd02950aef54296593b43d377c4ecdb7b1382 Mon Sep 17 00:00:00 2001 From: adamjking3 Date: Fri, 5 Jul 2019 23:16:26 -0700 Subject: [PATCH] Fix CLI bugs and add back to support non-CLI workflow. --- README.md | 51 ++++++++++++++++++++------------ cli.py | 12 ++++---- lib/cli/RLTraderCLI.py | 11 ++++--- lib/cli/__init__.py | 2 ++ lib/cli/functions/__init__.py | 1 + lib/cli/functions/update_data.py | 42 ++++++++++++++++++++++++++ optimize.py | 29 ++++++++++++++++++ 7 files changed, 118 insertions(+), 30 deletions(-) create mode 100644 lib/cli/__init__.py create mode 100644 lib/cli/functions/__init__.py create mode 100644 lib/cli/functions/update_data.py create mode 100644 optimize.py diff --git a/README.md b/README.md index 8409a2e..13aed7d 100644 --- a/README.md +++ b/README.md @@ -24,9 +24,11 @@ https://towardsdatascience.com/using-reinforcement-learning-to-trade-bitcoin-for ### How to find out if you have nVIDIA GPU? Linux: + ```bash sudo lspci | grep -i --color 'vga\|3d\|2d' | grep -i nvidia ``` + If this returns anything, then you should have an nVIDIA card. ### Basic usage @@ -37,8 +39,8 @@ The first thing you will need to do to get started is install the requirements. cd "path-of-your-cloned-rl-trader-dir" pip install -r requirements.txt ``` -More information regarding how you can take advantage of your GPU while using docker: https://github.com/NVIDIA/nvidia-docker +More information regarding how you can take advantage of your GPU while using docker: https://github.com/NVIDIA/nvidia-docker If you have another type of GPU or you simply want to use your CPU, use: @@ -47,9 +49,10 @@ pip install -r requirements.no-gpu.txt ``` Update your current static files, that are used by default: + ```bash - python ./cli.py update-static - ``` + python ./cli.py update-static-data +``` Afterwards you can simply see the currently available options: @@ -60,22 +63,23 @@ python ./cli.py --help or simply run the project with default options: ```bash -python ./cli.py opt-train-test +python ./cli.py optimize-train-test ``` -If you have a standard set of configs you want to run the trader against, you can specify a config file to load configuration from. Rename config/config.ini.dist to config/config.ini and run +If you have a standard set of configs you want to run the trader against, you can specify a config file to load configuration from. Rename config/config.ini.dist to config/config.ini and run ```bash -python ./cli.py --from-config config/config.ini opt-train-test +python ./cli.py --from-config config/config.ini optimize-train-test ``` ```bash -python ./cli.py opt-train-test +python ./cli.py optimize-train-test ``` ### Testing with vagrant Start the vagrant box using: + ```bash vagrant up ``` @@ -83,58 +87,65 @@ vagrant up Code will be located at /vagrant. Play and/or test with whatever package you wish. Note: With vagrant you cannot take full advantage of your GPU, so is mainly for testing purposes - ### Testing with docker If you want to run everything within a docker container, then just use: + ```bash -./run-with-docker (cpu|gpu) (yes|no) opt-train-test +./run-with-docker (cpu|gpu) (yes|no) optimize-train-test ``` + - cpu - start the container using CPU requirements - gpu - start the container using GPU requirements - yes | no - start or not a local postgres container -Note: in case using yes as second argument, use + Note: in case using yes as second argument, use ```bash -python ./ cli.py --params-db-path "postgres://rl_trader:rl_trader@localhost" opt-train-test +python ./ cli.py --params-db-path "postgres://rl_trader:rl_trader@localhost" optimize-train-test ``` The database and it's data are pesisted under `data/postgres` locally. If you want to spin a docker test environment: + ```bash ./run-with-docker (cpu|gpu) (yes|no) ``` If you want to run existing tests, then just use: + ```bash ./run-tests-with-docker ``` # Fire up a local docker dev environment + ```bash ./dev-with-docker ``` - # Optimizing, Training, and Testing While you could just let the agent train and run with the default PPO2 hyper-parameters, your agent would likely not be very profitable. The `stable-baselines` library provides a great set of default parameters that work for most problem domains, but we need to better. -To do this, you will need to run `cli.py`. +To do this, you will need to run `optimize.py`. ```bash -python ./cli.py opt-train-test +python ./optimize.py ``` This can take a while (hours to days depending on your hardware setup), but over time it will print to the console as trials are completed. Once a trial is completed, it will be stored in `./data/params.db`, an SQLite database, from which we can pull hyper-parameters to train our agent. From there, agents will be trained using the best set of hyper-parameters, and later tested on completely new data to verify the generalization of the algorithm. +Feel free to ask any questions in the Discord! + # Common troubleshooting ##### The specified module could not be found. + Normally this is caused by missing mpi module. You should install it according to your platorm. + - Windows: https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi - Linux/MacOS: https://www.mpich.org/downloads/ @@ -150,14 +161,16 @@ If you would like to contribute, here is the roadmap for the future of this proj - Allow model/agent to be passed in at run time **[@notadamking]** - Allow live data to be saved in a format that can be later trained on **[@notadamking]** - Enable paper-trading by default **[@notadamking]** -- ~Enable complete multi-processing throughout the environment~ **[@notadamking]** arunavo4 +- ~Enable complete multi-processing throughout the environment~ **[@notadamking]** arunavo4 - ~Optionally replace SQLite db with Postgres to enable multi-processed Optuna training~ - This is enabled through Docker, though support for Postgres still needs to be improved - ~Replace `DummyVecEnv` with `SubProcVecEnv` everywhere throughout the code~ **[@archenroot, @arunavo4, @notadamking]** - - Allow features to be added/removed at runtime - - Create simple API for turning off default features (e.g. prediction, indicators, etc.) - - Create simple API for adding new features to observation space +- Allow features to be added/removed at runtime + - Create simple API for turning off default features (e.g. prediction, indicators, etc.) + - Create simple API for adding new features to observation space + ## Stage 1: + - Add more optional features to the feature space - Other exchange pair data (e.g. LTC/USD, ETH/USD, EOS/BTC, etc.) - Twitter sentiment analysis @@ -189,4 +202,4 @@ If you would like to contribute, here is the roadmap for the future of this proj Contributions are encouraged and I will always do my best to get them implemented into the library ASAP. This project is meant to grow as the community around it grows. Let me know if there is anything that you would like to see in the future or if there is anything you feel is missing. -**Working on your first Pull Request?** You can learn how from this *free* series [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github) +**Working on your first Pull Request?** You can learn how from this _free_ series [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github) diff --git a/cli.py b/cli.py index 8e0b865..aa327d5 100644 --- a/cli.py +++ b/cli.py @@ -1,16 +1,18 @@ import numpy as np + from deco import concurrent + from lib.RLTrader import RLTrader from lib.cli.RLTraderCLI import RLTraderCLI from lib.util.logger import init_logger -from update_data import download_async +from lib.cli.functions import download_data_async np.warnings.filterwarnings('ignore') trader_cli = RLTraderCLI() args = trader_cli.get_args() -@concurrent(processes=args.proc_number) +@concurrent(processes=args.parallel_jobs) def run_concurrent_optimize(trader: RLTrader, args): trader.optimize(args.trials, args.trials, args.parallel_jobs) @@ -25,12 +27,12 @@ def run_concurrent_optimize(trader: RLTrader, args): trader.train(n_epochs=args.epochs) elif args.command == 'test': trader.test(model_epoch=args.model_epoch, should_render=args.no_render) - elif args.command == 'opt-train-test': + elif args.command == 'optimize-train-test': run_concurrent_optimize(trader, args) trader.train( n_epochs=args.train_epochs, test_trained_model=args.no_test, render_trained_model=args.no_render ) - elif args.command == 'update-static': - download_async() \ No newline at end of file + elif args.command == 'update-static-data': + download_data_async() diff --git a/lib/cli/RLTraderCLI.py b/lib/cli/RLTraderCLI.py index c720f0f..c2a4e4b 100644 --- a/lib/cli/RLTraderCLI.py +++ b/lib/cli/RLTraderCLI.py @@ -8,7 +8,7 @@ class RLTraderCLI: def __init__(self): config_parser = argparse.ArgumentParser(add_help=False) config_parser.add_argument("-f", "--from-config", help="Specify config file", metavar="FILE") - args, remaining_argv = config_parser.parse_known_args() + args, _ = config_parser.parse_known_args() defaults = {} if args.from_config: @@ -39,13 +39,12 @@ def __init__(self): help='Tensorboard path', dest='tensorboard_path' ) - self.parser.add_argument('--proc-number', type=int, default=multiprocessing.cpu_count(), - help='How many processes to spawn') + self.parser.add_argument('--parallel-jobs', type=int, default=multiprocessing.cpu_count(), + help='How many processes in parallel') subparsers = self.parser.add_subparsers(help='Command', dest="command") - opt_train_test_parser = subparsers.add_parser('opt-train-test', description='Optimize train and test') - opt_train_test_parser.add_argument('--parallel-jobs', type=int, default=1, help='How many jobs in parallel') + opt_train_test_parser = subparsers.add_parser('optimize-train-test', description='Optimize train and test') opt_train_test_parser.add_argument('--trials', type=int, default=20, help='Number of trials') opt_train_test_parser.add_argument('--train-epochs', type=int, default=10, help='Train for how many epochs') opt_train_test_parser.add_argument('--no-render', action='store_false', help='Should render the model') @@ -63,7 +62,7 @@ def __init__(self): test_parser.add_argument('--model-epoch', type=int, default=1, help='Model epoch index') test_parser.add_argument('--no-render', action='store_false', help='Do not render test') - subparsers.add_parser('update-static', description='Update static data') + subparsers.add_parser('update-static-data', description='Update static data') self.parser.set_defaults(**defaults) diff --git a/lib/cli/__init__.py b/lib/cli/__init__.py new file mode 100644 index 0000000..9ffaf98 --- /dev/null +++ b/lib/cli/__init__.py @@ -0,0 +1,2 @@ +from lib.cli.functions import * +from lib.cli.RLTraderCLI import RLTraderCLI diff --git a/lib/cli/functions/__init__.py b/lib/cli/functions/__init__.py new file mode 100644 index 0000000..a059498 --- /dev/null +++ b/lib/cli/functions/__init__.py @@ -0,0 +1 @@ +from lib.cli.functions.update_data import download_data_async diff --git a/lib/cli/functions/update_data.py b/lib/cli/functions/update_data.py new file mode 100644 index 0000000..58328fd --- /dev/null +++ b/lib/cli/functions/update_data.py @@ -0,0 +1,42 @@ +import asyncio +import ssl +import pandas as pd +import os + +final_date_format = '%Y-%m-%d %H:%M' +ssl._create_default_https_context = ssl._create_unverified_context + +hourly_url = "https://www.cryptodatadownload.com/cdd/Coinbase_BTCUSD_1h.csv" +daily_url = "https://www.cryptodatadownload.com/cdd/Coinbase_BTCUSD_d.csv" + + +async def save_url_to_csv(url: str, date_format: str, file_name: str): + csv = pd.read_csv(url, header=1) + csv = csv.dropna(thresh=2) + csv.columns = ['Date', 'Symbol', 'Open', 'High', 'Low', 'Close', 'VolumeFrom', 'VolumeTo'] + csv['Date'] = pd.to_datetime(csv['Date'], format=date_format) + csv['Date'] = csv['Date'].dt.strftime(final_date_format) + + final_path = os.path.join('data', 'input', file_name) + csv.to_csv(final_path, index=False) + + return csv + + +async def save_as_csv(hourly_url: str, daily_url: str): + tasks = [save_url_to_csv(hourly_url, '%Y-%m-%d %I-%p', 'coinbase-1h-btc-usd.csv'), + save_url_to_csv(daily_url, '%Y-%m-%d', 'coinbase-1d-btc-usd.csv')] + # also FIRST_EXCEPTION and ALL_COMPLETED (default) + done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + print('>> done: ', done) + print('>> pending: ', pending) # will be empty if using default return_when setting + + +def download_data_async(): + loop = asyncio.get_event_loop() + loop.run_until_complete(save_as_csv(hourly_url, daily_url)) + loop.close() + + +if __name__ == '__main__': + download_async() diff --git a/optimize.py b/optimize.py new file mode 100644 index 0000000..f003f84 --- /dev/null +++ b/optimize.py @@ -0,0 +1,29 @@ +import numpy as np + +import multiprocessing +from lib.RLTrader import RLTrader + +np.warnings.filterwarnings('ignore') + + +def optimize_code(params): + trader = RLTrader(**params) + trader.optimize() + + +if __name__ == '__main__': + n_process = multiprocessing.cpu_count() - 4 + params = {} + + processes = [] + for i in range(n_process): + processes.append(multiprocessing.Process(target=optimize_code, args=(params,))) + + for p in processes: + p.start() + + for p in processes: + p.join() + + trader = RLTrader(**params) + trader.train(test_trained_model=True, render_trained_model=True)