mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-10 10:21:59 +00:00
merge develop into feat/freqai-rl-dev
This commit is contained in:
commit
7a4bb040a5
|
@ -11,12 +11,14 @@
|
|||
"mounts": [
|
||||
"source=freqtrade-bashhistory,target=/home/ftuser/commandhistory,type=volume"
|
||||
],
|
||||
"workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/freqtrade,type=bind,consistency=cached",
|
||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "ftuser",
|
||||
|
||||
"onCreateCommand": "pip install --user -e .",
|
||||
"postCreateCommand": "freqtrade create-userdir --userdir user_data/",
|
||||
|
||||
"workspaceFolder": "/freqtrade/",
|
||||
"workspaceFolder": "/workspaces/freqtrade",
|
||||
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
|
|
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
@ -258,7 +258,7 @@ jobs:
|
|||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
|
||||
mypy_version_check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
@ -283,7 +283,7 @@ jobs:
|
|||
- uses: pre-commit/action@v3.0.0
|
||||
|
||||
docs_check:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
@ -313,7 +313,7 @@ jobs:
|
|||
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||
notify-complete:
|
||||
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
# Discord notification can't handle schedule events
|
||||
if: (github.event_name != 'schedule')
|
||||
permissions:
|
||||
|
@ -338,7 +338,7 @@ jobs:
|
|||
|
||||
deploy:
|
||||
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
|
|||
|
||||
- [X] [Binance](https://www.binance.com/)
|
||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||
- [X] [OKX](https://okx.com/).
|
||||
- [X] [OKX](https://okx.com/)
|
||||
|
||||
Please make sure to read the [exchange specific notes](docs/exchanges.md), as well as the [trading with leverage](docs/leverage.md) documentation before diving in.
|
||||
|
||||
|
|
Binary file not shown.
|
@ -175,6 +175,10 @@ print(res)
|
|||
|
||||
## FTX
|
||||
|
||||
!!! Warning
|
||||
Due to the current situation, we can no longer recommend FTX.
|
||||
Please make sure to investigate the current situation before depositing any funds to FTX.
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
FTX supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
||||
You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type of stoploss shall be used.
|
||||
|
|
|
@ -61,7 +61,7 @@ The FreqAI strategy requires including the following lines of code in the standa
|
|||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
from user indicated timeframes in the configuration file. User controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
passed to the training/prediction by prepending indicators with `'%-' + pair `
|
||||
(see convention below). I.e. user should not prepend any supporting metrics
|
||||
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
|
@ -69,20 +69,17 @@ The FreqAI strategy requires including the following lines of code in the standa
|
|||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
|
@ -134,7 +131,7 @@ Notice also the location of the labels under `if set_generalized_indicators:` at
|
|||
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`):
|
||||
|
||||
```python
|
||||
def populate_any_indicators(self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False):
|
||||
def populate_any_indicators(self, pair, df, tf, informative=None, set_generalized_indicators=False):
|
||||
|
||||
...
|
||||
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
|
||||
## Defining the features
|
||||
|
||||
Low level feature engineering is performed in the user strategy within a function called `populate_any_indicators()`. That function sets the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. One important syntax rule is that all `base features` string names are prepended with `%`, while labels/targets are prepended with `&`.
|
||||
Low level feature engineering is performed in the user strategy within a function called `populate_any_indicators()`. That function sets the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. One important syntax rule is that all `base features` string names are prepended with `%-{pair}`, while labels/targets are prepended with `&`.
|
||||
|
||||
!!! Note
|
||||
Adding the full pair string, e.g. XYZ/USD, in the feature name enables improved performance for dataframe caching on the backend. If you decide *not* to add the full pair string in the feature string, FreqAI will operate in a reduced performance mode.
|
||||
|
||||
Meanwhile, high level feature engineering is handled within `"feature_parameters":{}` in the FreqAI config. Within this file, it is possible to decide large scale feature expansions on top of the `base_features` such as "including correlated pairs" or "including informative timeframes" or even "including recent candles."
|
||||
|
||||
|
@ -15,7 +18,7 @@ It is advisable to start from the template `populate_any_indicators()` in the so
|
|||
"""
|
||||
Function designed to automatically generate, name, and merge features
|
||||
from user-indicated timeframes in the configuration file. The user controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
passed to the training/prediction by prepending indicators with `'%-' + pair `
|
||||
(see convention below). I.e., the user should not prepend any supporting metrics
|
||||
(e.g., bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
|
@ -23,37 +26,34 @@ It is advisable to start from the template `populate_any_indicators()` in the so
|
|||
:param df: strategy dataframe which will receive merges from informatives
|
||||
:param tf: timeframe of the dataframe which will modify the feature names
|
||||
:param informative: the dataframe associated with the informative pair
|
||||
:param coin: the name of the coin which will modify the feature names.
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
bollinger = qtpylib.bollinger_bands(
|
||||
qtpylib.typical_price(informative), window=t, stds=2.2
|
||||
)
|
||||
informative[f"{coin}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||
informative[f"{coin}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||
informative[f"{coin}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||
informative[f"{pair}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||
informative[f"{pair}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||
informative[f"{pair}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||
|
||||
informative[f"%-{coin}bb_width-period_{t}"] = (
|
||||
informative[f"{coin}bb_upperband-period_{t}"]
|
||||
- informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
) / informative[f"{coin}bb_middleband-period_{t}"]
|
||||
informative[f"%-{coin}close-bb_lower-period_{t}"] = (
|
||||
informative["close"] / informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
informative[f"%-{pair}bb_width-period_{t}"] = (
|
||||
informative[f"{pair}bb_upperband-period_{t}"]
|
||||
- informative[f"{pair}bb_lowerband-period_{t}"]
|
||||
) / informative[f"{pair}bb_middleband-period_{t}"]
|
||||
informative[f"%-{pair}close-bb_lower-period_{t}"] = (
|
||||
informative["close"] / informative[f"{pair}bb_lowerband-period_{t}"]
|
||||
)
|
||||
|
||||
informative[f"%-{coin}relative_volume-period_{t}"] = (
|
||||
informative[f"%-{pair}relative_volume-period_{t}"] = (
|
||||
informative["volume"] / informative["volume"].rolling(t).mean()
|
||||
)
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
||||
| `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
||||
| | **Feature parameters**
|
||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
||||
|
@ -37,7 +38,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
||||
| | **Data split parameters**
|
||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||
|
|
|
@ -73,12 +73,24 @@ Backtesting mode requires [downloading the necessary data](#downloading-data-to-
|
|||
|
||||
To allow for tweaking your strategy (**not** the features!), FreqAI will automatically save the predictions during backtesting so that they can be reused for future backtests and live runs using the same `identifier` model. This provides a performance enhancement geared towards enabling **high-level hyperopting** of entry/exit criteria.
|
||||
|
||||
An additional directory called `predictions`, which contains all the predictions stored in `hdf` format, will be created in the `unique-id` folder.
|
||||
An additional directory called `backtesting_predictions`, which contains all the predictions stored in `hdf` format, will be created in the `unique-id` folder.
|
||||
|
||||
To change your **features**, you **must** set a new `identifier` in the config to signal to FreqAI to train new models.
|
||||
|
||||
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
||||
|
||||
### Backtest live models
|
||||
|
||||
FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `True` in the config.
|
||||
|
||||
The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models.
|
||||
|
||||
Each model has an identifier derived from the training end date. If you have only 1 model trained, FreqAI will backtest from the training end date until the current date. If you have more than 1 model, each model will perform the backtesting according to the training end date until the training end date of the next model and so on. For the last model, the period of the previous model will be used for the execution.
|
||||
|
||||
!!! Note
|
||||
Currently, there is no checking for expired models, even if the `expired_hours` parameter is set.
|
||||
|
||||
|
||||
### Downloading data to cover the full backtest period
|
||||
|
||||
For live/dry deployments, FreqAI will download the necessary data automatically. However, to use backtesting functionality, you need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). You need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that there is a sufficient amount of training data *before* the start of the backtesting time range. The amount of additional data can be roughly estimated by moving the start date of the time range backwards by `train_period_days` and the `startup_candle_count` (see the [parameter table](freqai-parameter-table.md) for detailed descriptions of these parameters) from the beginning of the desired backtesting time range.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
## Introduction
|
||||
|
||||
FreqAI is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input features.
|
||||
FreqAI is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input signals. In general, the FreqAI aims to be a sand-box for easily deploying robust machine-learning libraries on real-time data ([details])(#freqai-position-in-open-source-machine-learning-landscape).
|
||||
|
||||
Features include:
|
||||
|
||||
|
@ -72,6 +72,11 @@ pip install -r requirements-freqai.txt
|
|||
|
||||
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||
|
||||
|
||||
### FreqAI position in open-source machine learning landscape
|
||||
|
||||
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data-exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data-collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
||||
|
||||
## Common pitfalls
|
||||
|
||||
FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically).
|
||||
|
|
|
@ -286,6 +286,18 @@ Min price precision for SHITCOIN/BTC is 8 decimals. If its price is 0.00000011 -
|
|||
|
||||
Shuffles (randomizes) pairs in the pairlist. It can be used for preventing the bot from trading some of the pairs more frequently then others when you want all pairs be treated with the same priority.
|
||||
|
||||
By default, ShuffleFilter will shuffle pairs once per candle.
|
||||
To shuffle on every iteration, set `"shuffle_frequency"` to `"iteration"` instead of the default of `"candle"`.
|
||||
|
||||
``` json
|
||||
{
|
||||
"method": "ShuffleFilter",
|
||||
"shuffle_frequency": "candle",
|
||||
"seed": 42
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
!!! Tip
|
||||
You may set the `seed` value for this Pairlist to obtain reproducible results, which can be useful for repeated backtesting sessions. If `seed` is not set, the pairs are shuffled in the non-repeatable random order. ShuffleFilter will automatically detect runmodes and apply the `seed` only for backtesting modes - if a `seed` value is set.
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
|
|||
- Run: Test your strategy with simulated money (Dry-Run mode) or deploy it with real money (Live-Trade mode).
|
||||
- Run using Edge (optional module): The concept is to find the best historical [trade expectancy](edge.md#expectancy) by markets based on variation of the stop-loss and then allow/reject markets to trade. The sizing of the trade is based on a risk of a percentage of your capital.
|
||||
- Control/Monitor: Use Telegram or a WebUI (start/stop the bot, show profit/loss, daily summary, current open trades results, etc.).
|
||||
- Analyse: Further analysis can be performed on either Backtesting data or Freqtrade trading history (SQL database), including automated standard plots, and methods to load the data into [interactive environments](data-analysis.md).
|
||||
- Analyze: Further analysis can be performed on either Backtesting data or Freqtrade trading history (SQL database), including automated standard plots, and methods to load the data into [interactive environments](data-analysis.md).
|
||||
|
||||
## Supported exchange marketplaces
|
||||
|
||||
|
@ -51,7 +51,7 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
|
|||
|
||||
- [X] [Binance](https://www.binance.com/)
|
||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||
- [X] [OKX](https://okx.com/).
|
||||
- [X] [OKX](https://okx.com/)
|
||||
|
||||
Please make sure to read the [exchange specific notes](exchanges.md), as well as the [trading with leverage](leverage.md) documentation before diving in.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
markdown==3.3.7
|
||||
mkdocs==1.4.1
|
||||
mkdocs-material==8.5.7
|
||||
mkdocs==1.4.2
|
||||
mkdocs-material==8.5.8
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==9.7
|
||||
jinja2==3.1.2
|
||||
|
|
|
@ -3,15 +3,16 @@
|
|||
We **strongly** recommend that Windows users use [Docker](docker_quickstart.md) as this will work much easier and smoother (also more secure).
|
||||
|
||||
If that is not possible, try using the Windows Linux subsystem (WSL) - for which the Ubuntu instructions should work.
|
||||
Otherwise, try the instructions below.
|
||||
Otherwise, please follow the instructions below.
|
||||
|
||||
## Install freqtrade manually
|
||||
|
||||
!!! Note
|
||||
Make sure to use 64bit Windows and 64bit Python to avoid problems with backtesting or hyperopt due to the memory constraints 32bit applications have under Windows.
|
||||
!!! Note "64bit Python version"
|
||||
Please make sure to use 64bit Windows and 64bit Python to avoid problems with backtesting or hyperopt due to the memory constraints 32bit applications have under Windows.
|
||||
32bit python versions are no longer supported under Windows.
|
||||
|
||||
!!! Hint
|
||||
Using the [Anaconda Distribution](https://www.anaconda.com/distribution/) under Windows can greatly help with installation problems. Check out the [Anaconda installation section](installation.md#Anaconda) in this document for more information.
|
||||
Using the [Anaconda Distribution](https://www.anaconda.com/distribution/) under Windows can greatly help with installation problems. Check out the [Anaconda installation section](installation.md#installation-with-conda) in the documentation for more information.
|
||||
|
||||
### 1. Clone the git repository
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@ ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", "dataformat_ohlcv",
|
|||
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions",
|
||||
"enable_protections", "dry_run_wallet", "timeframe_detail",
|
||||
"strategy_list", "export", "exportfilename",
|
||||
"backtest_breakdown", "backtest_cache"]
|
||||
"backtest_breakdown", "backtest_cache",
|
||||
"freqai_backtest_live_models"]
|
||||
|
||||
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
|
||||
"position_stacking", "use_max_market_positions",
|
||||
|
|
|
@ -668,4 +668,9 @@ AVAILABLE_CLI_OPTIONS = {
|
|||
help='Specify additional lookup path for freqaimodels.',
|
||||
metavar='PATH',
|
||||
),
|
||||
"freqai_backtest_live_models": Arg(
|
||||
'--freqai-backtest-live-models',
|
||||
help='Run backtest with ready models.',
|
||||
action='store_true'
|
||||
),
|
||||
}
|
||||
|
|
|
@ -86,6 +86,7 @@ def validate_config_consistency(conf: Dict[str, Any], preliminary: bool = False)
|
|||
_validate_unlimited_amount(conf)
|
||||
_validate_ask_orderbook(conf)
|
||||
_validate_freqai_hyperopt(conf)
|
||||
_validate_freqai_backtest(conf)
|
||||
_validate_freqai_include_timeframes(conf)
|
||||
_validate_consumers(conf)
|
||||
validate_migrated_strategy_settings(conf)
|
||||
|
@ -355,6 +356,26 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any]) -> None:
|
|||
f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}")
|
||||
|
||||
|
||||
def _validate_freqai_backtest(conf: Dict[str, Any]) -> None:
|
||||
if conf.get('runmode', RunMode.OTHER) == RunMode.BACKTEST:
|
||||
freqai_enabled = conf.get('freqai', {}).get('enabled', False)
|
||||
timerange = conf.get('timerange')
|
||||
freqai_backtest_live_models = conf.get('freqai_backtest_live_models', False)
|
||||
if freqai_backtest_live_models and freqai_enabled and timerange:
|
||||
raise OperationalException(
|
||||
'Using timerange parameter is not supported with '
|
||||
'--freqai-backtest-live-models parameter.')
|
||||
|
||||
if freqai_backtest_live_models and not freqai_enabled:
|
||||
raise OperationalException(
|
||||
'Using --freqai-backtest-live-models parameter is only '
|
||||
'supported with a FreqAI strategy.')
|
||||
|
||||
if freqai_enabled and not freqai_backtest_live_models and not timerange:
|
||||
raise OperationalException(
|
||||
'Please pass --timerange if you intend to use FreqAI for backtesting.')
|
||||
|
||||
|
||||
def _validate_consumers(conf: Dict[str, Any]) -> None:
|
||||
emc_conf = conf.get('external_message_consumer', {})
|
||||
if emc_conf.get('enabled', False):
|
||||
|
|
|
@ -279,6 +279,9 @@ class Configuration:
|
|||
self._args_to_config(config, argname='disableparamexport',
|
||||
logstring='Parameter --disableparamexport detected: {} ...')
|
||||
|
||||
self._args_to_config(config, argname='freqai_backtest_live_models',
|
||||
logstring='Parameter --freqai-backtest-live-models detected ...')
|
||||
|
||||
# Edge section:
|
||||
if 'stoploss_range' in self.args and self.args["stoploss_range"]:
|
||||
txt_range = eval(self.args["stoploss_range"])
|
||||
|
|
|
@ -542,7 +542,7 @@ CONF_SCHEMA = {
|
|||
"keras": {"type": "boolean", "default": False},
|
||||
"write_metrics_to_disk": {"type": "boolean", "default": False},
|
||||
"purge_old_models": {"type": "boolean", "default": True},
|
||||
"conv_width": {"type": "integer", "default": 2},
|
||||
"conv_width": {"type": "integer", "default": 1},
|
||||
"train_period_days": {"type": "integer", "default": 0},
|
||||
"backtest_period_days": {"type": "number", "default": 7},
|
||||
"identifier": {"type": "string", "default": "example"},
|
||||
|
|
|
@ -26,7 +26,7 @@ BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date',
|
|||
'profit_ratio', 'profit_abs', 'exit_reason',
|
||||
'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',
|
||||
'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag',
|
||||
'is_short', 'open_timestamp', 'close_timestamp', 'orders'
|
||||
'leverage', 'is_short', 'open_timestamp', 'close_timestamp', 'orders'
|
||||
]
|
||||
|
||||
|
||||
|
@ -280,6 +280,8 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
|
|||
# Compatibility support for pre short Columns
|
||||
if 'is_short' not in df.columns:
|
||||
df['is_short'] = 0
|
||||
if 'leverage' not in df.columns:
|
||||
df['leverage'] = 1.0
|
||||
if 'enter_tag' not in df.columns:
|
||||
df['enter_tag'] = df['buy_tag']
|
||||
df = df.drop(['buy_tag'], axis=1)
|
||||
|
|
|
@ -102,6 +102,11 @@ class IDataHandler(ABC):
|
|||
:return: (min, max)
|
||||
"""
|
||||
data = self._ohlcv_load(pair, timeframe, None, candle_type)
|
||||
if data.empty:
|
||||
return (
|
||||
datetime.fromtimestamp(0, tz=timezone.utc),
|
||||
datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
)
|
||||
return data.iloc[0]['date'].to_pydatetime(), data.iloc[-1]['date'].to_pydatetime()
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -21,7 +21,11 @@ class Bybit(Exchange):
|
|||
|
||||
_ft_has: Dict = {
|
||||
"ohlcv_candle_limit": 200,
|
||||
"ccxt_futures_name": "linear"
|
||||
"ccxt_futures_name": "linear",
|
||||
"ohlcv_has_history": False,
|
||||
}
|
||||
_ft_has_futures: Dict = {
|
||||
"ohlcv_has_history": True,
|
||||
}
|
||||
|
||||
_supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [
|
||||
|
|
|
@ -1689,6 +1689,17 @@ class Exchange:
|
|||
@retrier
|
||||
def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1,
|
||||
price: float = 1, taker_or_maker: MakerTaker = 'maker') -> float:
|
||||
"""
|
||||
Retrieve fee from exchange
|
||||
:param symbol: Pair
|
||||
:param type: Type of order (market, limit, ...)
|
||||
:param side: Side of order (buy, sell)
|
||||
:param amount: Amount of order
|
||||
:param price: Price of order
|
||||
:param taker_or_maker: 'maker' or 'taker' (ignored if "type" is provided)
|
||||
"""
|
||||
if type and type == 'market':
|
||||
taker_or_maker = 'taker'
|
||||
try:
|
||||
if self._config['dry_run'] and self._config.get('fee', None) is not None:
|
||||
return self._config['fee']
|
||||
|
|
93
freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py
Normal file
93
freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py
Normal file
|
@ -0,0 +1,93 @@
|
|||
import numpy as np
|
||||
from joblib import Parallel
|
||||
from sklearn.base import is_classifier
|
||||
from sklearn.multioutput import MultiOutputClassifier, _fit_estimator
|
||||
from sklearn.utils.fixes import delayed
|
||||
from sklearn.utils.multiclass import check_classification_targets
|
||||
from sklearn.utils.validation import has_fit_parameter
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
||||
class FreqaiMultiOutputClassifier(MultiOutputClassifier):
|
||||
|
||||
def fit(self, X, y, sample_weight=None, fit_params=None):
|
||||
"""Fit the model to data, separately for each output variable.
|
||||
Parameters
|
||||
----------
|
||||
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
||||
The input data.
|
||||
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
|
||||
Multi-output targets. An indicator matrix turns on multilabel
|
||||
estimation.
|
||||
sample_weight : array-like of shape (n_samples,), default=None
|
||||
Sample weights. If `None`, then samples are equally weighted.
|
||||
Only supported if the underlying classifier supports sample
|
||||
weights.
|
||||
fit_params : A list of dicts for the fit_params
|
||||
Parameters passed to the ``estimator.fit`` method of each step.
|
||||
Each dict may contain same or different values (e.g. different
|
||||
eval_sets or init_models)
|
||||
.. versionadded:: 0.23
|
||||
Returns
|
||||
-------
|
||||
self : object
|
||||
Returns a fitted instance.
|
||||
"""
|
||||
|
||||
if not hasattr(self.estimator, "fit"):
|
||||
raise ValueError("The base estimator should implement a fit method")
|
||||
|
||||
y = self._validate_data(X="no_validation", y=y, multi_output=True)
|
||||
|
||||
if is_classifier(self):
|
||||
check_classification_targets(y)
|
||||
|
||||
if y.ndim == 1:
|
||||
raise ValueError(
|
||||
"y must have at least two dimensions for "
|
||||
"multi-output regression but has only one."
|
||||
)
|
||||
|
||||
if sample_weight is not None and not has_fit_parameter(
|
||||
self.estimator, "sample_weight"
|
||||
):
|
||||
raise ValueError("Underlying estimator does not support sample weights.")
|
||||
|
||||
if not fit_params:
|
||||
fit_params = [None] * y.shape[1]
|
||||
|
||||
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
|
||||
delayed(_fit_estimator)(
|
||||
self.estimator, X, y[:, i], sample_weight, **fit_params[i]
|
||||
)
|
||||
for i in range(y.shape[1])
|
||||
)
|
||||
|
||||
self.classes_ = []
|
||||
for estimator in self.estimators_:
|
||||
self.classes_.extend(estimator.classes_)
|
||||
if len(set(self.classes_)) != len(self.classes_):
|
||||
raise OperationalException(f"Class labels must be unique across targets: "
|
||||
f"{self.classes_}")
|
||||
|
||||
if hasattr(self.estimators_[0], "n_features_in_"):
|
||||
self.n_features_in_ = self.estimators_[0].n_features_in_
|
||||
if hasattr(self.estimators_[0], "feature_names_in_"):
|
||||
self.feature_names_in_ = self.estimators_[0].feature_names_in_
|
||||
|
||||
return self
|
||||
|
||||
def predict_proba(self, X):
|
||||
"""
|
||||
Get predict_proba and stack arrays horizontally
|
||||
"""
|
||||
results = np.hstack(super().predict_proba(X))
|
||||
return np.squeeze(results)
|
||||
|
||||
def predict(self, X):
|
||||
"""
|
||||
Get predict and squeeze into 2D array
|
||||
"""
|
||||
results = super().predict(X)
|
||||
return np.squeeze(results)
|
|
@ -87,6 +87,7 @@ class FreqaiDataDrawer:
|
|||
self.create_follower_dict()
|
||||
self.load_drawer_from_disk()
|
||||
self.load_historic_predictions_from_disk()
|
||||
self.metric_tracker: Dict[str, Dict[str, Dict[str, list]]] = {}
|
||||
self.load_metric_tracker_from_disk()
|
||||
self.training_queue: Dict[str, int] = {}
|
||||
self.history_lock = threading.Lock()
|
||||
|
@ -97,7 +98,6 @@ class FreqaiDataDrawer:
|
|||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
"data_path": "", "extras": {}}
|
||||
self.metric_tracker: Dict[str, Dict[str, Dict[str, list]]] = {}
|
||||
self.limit_ram_use = self.freqai_info.get('limit_ram_usage', False)
|
||||
if 'rl_config' in self.freqai_info:
|
||||
self.model_type = 'stable_baselines'
|
||||
|
@ -160,6 +160,7 @@ class FreqaiDataDrawer:
|
|||
if exists:
|
||||
with open(self.metric_tracker_path, "r") as fp:
|
||||
self.metric_tracker = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||
logger.info("Loading existing metric tracker from disk.")
|
||||
else:
|
||||
logger.info("Could not find existing metric tracker, starting from scratch")
|
||||
|
||||
|
@ -549,14 +550,6 @@ class FreqaiDataDrawer:
|
|||
if dk.live:
|
||||
dk.model_filename = self.pair_dict[coin]["model_filename"]
|
||||
dk.data_path = Path(self.pair_dict[coin]["data_path"])
|
||||
if self.freqai_info.get("follow_mode", False):
|
||||
# follower can be on a different system which is rsynced from the leader:
|
||||
dk.data_path = Path(
|
||||
self.config["user_data_dir"]
|
||||
/ "models"
|
||||
/ dk.data_path.parts[-2]
|
||||
/ dk.data_path.parts[-1]
|
||||
)
|
||||
|
||||
if coin in self.meta_data_dictionary:
|
||||
dk.data = self.meta_data_dictionary[coin]["meta_data"]
|
||||
|
@ -652,6 +645,8 @@ class FreqaiDataDrawer:
|
|||
axis=0,
|
||||
)
|
||||
|
||||
self.current_candle = history_data[dk.pair][self.config['timeframe']].iloc[-1]['date']
|
||||
|
||||
def load_all_pair_histories(self, timerange: TimeRange, dk: FreqaiDataKitchen) -> None:
|
||||
"""
|
||||
Load pair histories for all whitelist and corr_pairlist pairs.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import copy
|
||||
import logging
|
||||
import shutil
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from math import cos, sin
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
@ -81,19 +81,25 @@ class FreqaiDataKitchen:
|
|||
self.svm_model: linear_model.SGDOneClassSVM = None
|
||||
self.keras: bool = self.freqai_config.get("keras", False)
|
||||
self.set_all_pairs()
|
||||
if not self.live:
|
||||
if not self.config["timerange"]:
|
||||
raise OperationalException(
|
||||
'Please pass --timerange if you intend to use FreqAI for backtesting.')
|
||||
self.full_timerange = self.create_fulltimerange(
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days", 0)
|
||||
)
|
||||
self.backtest_live_models = config.get("freqai_backtest_live_models", False)
|
||||
|
||||
(self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(
|
||||
self.full_timerange,
|
||||
config["freqai"]["train_period_days"],
|
||||
config["freqai"]["backtest_period_days"],
|
||||
)
|
||||
if not self.live:
|
||||
self.full_path = self.get_full_models_path(self.config)
|
||||
|
||||
if self.backtest_live_models:
|
||||
if self.pair:
|
||||
self.set_timerange_from_ready_models()
|
||||
(self.training_timeranges,
|
||||
self.backtesting_timeranges) = self.split_timerange_live_models()
|
||||
else:
|
||||
self.full_timerange = self.create_fulltimerange(
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days", 0)
|
||||
)
|
||||
(self.training_timeranges, self.backtesting_timeranges) = self.split_timerange(
|
||||
self.full_timerange,
|
||||
config["freqai"]["train_period_days"],
|
||||
config["freqai"]["backtest_period_days"],
|
||||
)
|
||||
|
||||
self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})
|
||||
if not self.freqai_config.get("data_kitchen_thread_count", 0):
|
||||
|
@ -103,6 +109,7 @@ class FreqaiDataKitchen:
|
|||
self.train_dates: DataFrame = pd.DataFrame()
|
||||
self.unique_classes: Dict[str, list] = {}
|
||||
self.unique_class_list: list = []
|
||||
self.backtest_live_models_data: Dict[str, Any] = {}
|
||||
|
||||
def set_paths(
|
||||
self,
|
||||
|
@ -114,10 +121,7 @@ class FreqaiDataKitchen:
|
|||
:param metadata: dict = strategy furnished pair metadata
|
||||
:param trained_timestamp: int = timestamp of most recent training
|
||||
"""
|
||||
self.full_path = Path(
|
||||
self.config["user_data_dir"] / "models" / str(self.freqai_config.get("identifier"))
|
||||
)
|
||||
|
||||
self.full_path = self.get_full_models_path(self.config)
|
||||
self.data_path = Path(
|
||||
self.full_path
|
||||
/ f"sub-train-{pair.split('/')[0]}_{trained_timestamp}"
|
||||
|
@ -248,7 +252,7 @@ class FreqaiDataKitchen:
|
|||
self.data["filter_drop_index_training"] = drop_index
|
||||
|
||||
else:
|
||||
if len(self.data['constant_features_list']):
|
||||
if 'constant_features_list' in self.data and len(self.data['constant_features_list']):
|
||||
filtered_df = self.check_pred_labels(filtered_df)
|
||||
# we are backtesting so we need to preserve row number to send back to strategy,
|
||||
# so now we use do_predict to avoid any prediction based on a NaN
|
||||
|
@ -459,6 +463,29 @@ class FreqaiDataKitchen:
|
|||
# print(tr_training_list, tr_backtesting_list)
|
||||
return tr_training_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def split_timerange_live_models(
|
||||
self
|
||||
) -> Tuple[list, list]:
|
||||
|
||||
tr_backtesting_list_timerange = []
|
||||
asset = self.pair.split("/")[0]
|
||||
if asset not in self.backtest_live_models_data["assets_end_dates"]:
|
||||
raise OperationalException(
|
||||
f"Model not available for pair {self.pair}. "
|
||||
"Please, try again after removing this pair from the configuration file."
|
||||
)
|
||||
asset_data = self.backtest_live_models_data["assets_end_dates"][asset]
|
||||
backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"]
|
||||
model_end_dates = [x for x in asset_data]
|
||||
model_end_dates.append(backtesting_timerange.stopts)
|
||||
model_end_dates.sort()
|
||||
for index, item in enumerate(model_end_dates):
|
||||
if len(model_end_dates) > (index + 1):
|
||||
tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1])
|
||||
tr_backtesting_list_timerange.append(tr_to_add)
|
||||
|
||||
return tr_backtesting_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Given a full dataframe, extract the user desired window
|
||||
|
@ -966,11 +993,13 @@ class FreqaiDataKitchen:
|
|||
append_df[label] = predictions[label]
|
||||
if append_df[label].dtype == object:
|
||||
continue
|
||||
append_df[f"{label}_mean"] = self.data["labels_mean"][label]
|
||||
append_df[f"{label}_std"] = self.data["labels_std"][label]
|
||||
if "labels_mean" in self.data:
|
||||
append_df[f"{label}_mean"] = self.data["labels_mean"][label]
|
||||
if "labels_std" in self.data:
|
||||
append_df[f"{label}_std"] = self.data["labels_std"][label]
|
||||
|
||||
for extra_col in self.data["extra_returns_per_train"]:
|
||||
append_df["{extra_col}"] = self.data["extra_returns_per_train"][extra_col]
|
||||
append_df[f"{extra_col}"] = self.data["extra_returns_per_train"][extra_col]
|
||||
|
||||
append_df["do_predict"] = do_predict
|
||||
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
|
@ -1035,11 +1064,6 @@ class FreqaiDataKitchen:
|
|||
start = datetime.fromtimestamp(backtest_timerange.startts, tz=timezone.utc)
|
||||
stop = datetime.fromtimestamp(backtest_timerange.stopts, tz=timezone.utc)
|
||||
full_timerange = start.strftime("%Y%m%d") + "-" + stop.strftime("%Y%m%d")
|
||||
|
||||
self.full_path = Path(
|
||||
self.config["user_data_dir"] / "models" / f"{self.freqai_config['identifier']}"
|
||||
)
|
||||
|
||||
config_path = Path(self.config["config_files"][0])
|
||||
|
||||
if not self.full_path.is_dir():
|
||||
|
@ -1122,15 +1146,15 @@ class FreqaiDataKitchen:
|
|||
|
||||
return retrain, trained_timerange, data_load_timerange
|
||||
|
||||
def set_new_model_names(self, pair: str, trained_timerange: TimeRange):
|
||||
def set_new_model_names(self, pair: str, timestamp_id: int):
|
||||
|
||||
coin, _ = pair.split("/")
|
||||
self.data_path = Path(
|
||||
self.full_path
|
||||
/ f"sub-train-{pair.split('/')[0]}_{int(trained_timerange.stopts)}"
|
||||
/ f"sub-train-{pair.split('/')[0]}_{timestamp_id}"
|
||||
)
|
||||
|
||||
self.model_filename = f"cb_{coin.lower()}_{int(trained_timerange.stopts)}"
|
||||
self.model_filename = f"cb_{coin.lower()}_{timestamp_id}"
|
||||
|
||||
def set_all_pairs(self) -> None:
|
||||
|
||||
|
@ -1141,6 +1165,54 @@ class FreqaiDataKitchen:
|
|||
if pair not in self.all_pairs:
|
||||
self.all_pairs.append(pair)
|
||||
|
||||
def extract_corr_pair_columns_from_populated_indicators(
|
||||
self,
|
||||
dataframe: DataFrame
|
||||
) -> Dict[str, DataFrame]:
|
||||
"""
|
||||
Find the columns of the dataframe corresponding to the corr_pairlist, save them
|
||||
in a dictionary to be reused and attached to other pairs.
|
||||
|
||||
:param dataframe: fully populated dataframe (current pair + corr_pairs)
|
||||
:return: corr_dataframes, dictionary of dataframes to be attached
|
||||
to other pairs in same candle.
|
||||
"""
|
||||
corr_dataframes: Dict[str, DataFrame] = {}
|
||||
pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
|
||||
for pair in pairs:
|
||||
pair = pair.replace(':', '') # lightgbm doesnt like colons
|
||||
valid_strs = [f"%-{pair}", f"%{pair}", f"%_{pair}"]
|
||||
pair_cols = [col for col in dataframe.columns if
|
||||
any(substr in col for substr in valid_strs)]
|
||||
if pair_cols:
|
||||
pair_cols.insert(0, 'date')
|
||||
corr_dataframes[pair] = dataframe.filter(pair_cols, axis=1)
|
||||
|
||||
return corr_dataframes
|
||||
|
||||
def attach_corr_pair_columns(self, dataframe: DataFrame,
|
||||
corr_dataframes: Dict[str, DataFrame],
|
||||
current_pair: str) -> DataFrame:
|
||||
"""
|
||||
Attach the existing corr_pair dataframes to the current pair dataframe before training
|
||||
|
||||
:param dataframe: current pair strategy dataframe, indicators populated already
|
||||
:param corr_dataframes: dictionary of saved dataframes from earlier in the same candle
|
||||
:param current_pair: current pair to which we will attach corr pair dataframe
|
||||
:return:
|
||||
:dataframe: current pair dataframe of populated indicators, concatenated with corr_pairs
|
||||
ready for training
|
||||
"""
|
||||
pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
current_pair = current_pair.replace(':', '')
|
||||
for pair in pairs:
|
||||
pair = pair.replace(':', '') # lightgbm doesnt work with colons
|
||||
if current_pair != pair:
|
||||
dataframe = dataframe.merge(corr_dataframes[pair], how='left', on='date')
|
||||
|
||||
return dataframe
|
||||
|
||||
def use_strategy_to_populate_indicators(
|
||||
self,
|
||||
strategy: IStrategy,
|
||||
|
@ -1148,6 +1220,7 @@ class FreqaiDataKitchen:
|
|||
base_dataframes: dict = {},
|
||||
pair: str = "",
|
||||
prediction_dataframe: DataFrame = pd.DataFrame(),
|
||||
do_corr_pairs: bool = True,
|
||||
) -> DataFrame:
|
||||
"""
|
||||
Use the user defined strategy for populating indicators during retrain
|
||||
|
@ -1157,15 +1230,15 @@ class FreqaiDataKitchen:
|
|||
:param base_dataframes: dict = dict containing the current pair dataframes
|
||||
(for user defined timeframes)
|
||||
:param metadata: dict = strategy furnished pair metadata
|
||||
:returns:
|
||||
:return:
|
||||
dataframe: DataFrame = dataframe containing populated indicators
|
||||
"""
|
||||
|
||||
# for prediction dataframe creation, we let dataprovider handle everything in the strategy
|
||||
# so we create empty dictionaries, which allows us to pass None to
|
||||
# `populate_any_indicators()`. Signaling we want the dp to give us the live dataframe.
|
||||
tfs = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||
pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
|
||||
pairs: List[str] = self.freqai_config["feature_parameters"].get("include_corr_pairlist", [])
|
||||
if not prediction_dataframe.empty:
|
||||
dataframe = prediction_dataframe.copy()
|
||||
for tf in tfs:
|
||||
|
@ -1188,19 +1261,24 @@ class FreqaiDataKitchen:
|
|||
informative=base_dataframes[tf],
|
||||
set_generalized_indicators=sgi
|
||||
)
|
||||
if pairs:
|
||||
for i in pairs:
|
||||
if pair in i:
|
||||
continue # dont repeat anything from whitelist
|
||||
|
||||
# ensure corr pairs are always last
|
||||
for corr_pair in pairs:
|
||||
if pair == corr_pair:
|
||||
continue # dont repeat anything from whitelist
|
||||
for tf in tfs:
|
||||
if pairs and do_corr_pairs:
|
||||
dataframe = strategy.populate_any_indicators(
|
||||
i,
|
||||
corr_pair,
|
||||
dataframe.copy(),
|
||||
tf,
|
||||
informative=corr_dataframes[i][tf]
|
||||
informative=corr_dataframes[corr_pair][tf]
|
||||
)
|
||||
|
||||
self.get_unique_classes_from_labels(dataframe)
|
||||
|
||||
dataframe = self.remove_special_chars_from_feature_names(dataframe)
|
||||
|
||||
return dataframe
|
||||
|
||||
def fit_labels(self) -> None:
|
||||
|
@ -1267,14 +1345,16 @@ class FreqaiDataKitchen:
|
|||
append_df = pd.read_hdf(self.backtesting_results_path)
|
||||
return append_df
|
||||
|
||||
def check_if_backtest_prediction_exists(
|
||||
self
|
||||
def check_if_backtest_prediction_is_valid(
|
||||
self,
|
||||
len_backtest_df: int
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a backtesting prediction already exists
|
||||
:param dk: FreqaiDataKitchen
|
||||
Check if a backtesting prediction already exists and if the predictions
|
||||
to append have the same size as the backtesting dataframe slice
|
||||
:param length_backtesting_dataframe: Length of backtesting dataframe slice
|
||||
:return:
|
||||
:boolean: whether the prediction file exists or not.
|
||||
:boolean: whether the prediction file is valid.
|
||||
"""
|
||||
path_to_predictionfile = Path(self.full_path /
|
||||
self.backtest_predictions_folder /
|
||||
|
@ -1282,10 +1362,134 @@ class FreqaiDataKitchen:
|
|||
self.backtesting_results_path = path_to_predictionfile
|
||||
|
||||
file_exists = path_to_predictionfile.is_file()
|
||||
|
||||
if file_exists:
|
||||
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
|
||||
append_df = self.get_backtesting_prediction()
|
||||
if len(append_df) == len_backtest_df:
|
||||
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
|
||||
return True
|
||||
else:
|
||||
logger.info("A new backtesting prediction file is required. "
|
||||
"(Number of predictions is different from dataframe length).")
|
||||
return False
|
||||
else:
|
||||
logger.info(
|
||||
f"Could not find backtesting prediction file at {path_to_predictionfile}"
|
||||
)
|
||||
return file_exists
|
||||
return False
|
||||
|
||||
def set_timerange_from_ready_models(self):
|
||||
backtesting_timerange, \
|
||||
assets_end_dates = (
|
||||
self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path))
|
||||
|
||||
self.backtest_live_models_data = {
|
||||
"backtesting_timerange": backtesting_timerange,
|
||||
"assets_end_dates": assets_end_dates
|
||||
}
|
||||
return
|
||||
|
||||
def get_full_models_path(self, config: Config) -> Path:
|
||||
"""
|
||||
Returns default FreqAI model path
|
||||
:param config: Configuration dictionary
|
||||
"""
|
||||
freqai_config: Dict[str, Any] = config["freqai"]
|
||||
return Path(
|
||||
config["user_data_dir"] / "models" / str(freqai_config.get("identifier"))
|
||||
)
|
||||
|
||||
def get_timerange_and_assets_end_dates_from_ready_models(
|
||||
self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]:
|
||||
"""
|
||||
Returns timerange information based on a FreqAI model directory
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Tuple with (Timerange calculated from directory and
|
||||
a Dict with pair and model end training dates info)
|
||||
"""
|
||||
all_models_end_dates = []
|
||||
assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models(
|
||||
models_path)
|
||||
for key in assets_end_dates:
|
||||
for model_end_date in assets_end_dates[key]:
|
||||
if model_end_date not in all_models_end_dates:
|
||||
all_models_end_dates.append(model_end_date)
|
||||
|
||||
if len(all_models_end_dates) == 0:
|
||||
raise OperationalException(
|
||||
'At least 1 saved model is required to '
|
||||
'run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
|
||||
if len(all_models_end_dates) == 1:
|
||||
logger.warning(
|
||||
"Only 1 model was found. Backtesting will run with the "
|
||||
"timerange from the end of the training date to the current date"
|
||||
)
|
||||
|
||||
finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp())
|
||||
if len(all_models_end_dates) > 1:
|
||||
# After last model end date, use the same period from previous model
|
||||
# to finish the backtest
|
||||
all_models_end_dates.sort(reverse=True)
|
||||
finish_timestamp = all_models_end_dates[0] + \
|
||||
(all_models_end_dates[0] - all_models_end_dates[1])
|
||||
|
||||
all_models_end_dates.append(finish_timestamp)
|
||||
all_models_end_dates.sort()
|
||||
start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
|
||||
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
||||
end_date = end_date + timedelta(days=1)
|
||||
backtesting_timerange = TimeRange(
|
||||
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
||||
)
|
||||
return backtesting_timerange, assets_end_dates
|
||||
|
||||
def get_assets_timestamps_training_from_ready_models(
|
||||
self, models_path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
Scan the models path and returns all assets end training dates (timestamp)
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Dict with asset and model end training dates info
|
||||
"""
|
||||
assets_end_dates: Dict[str, Any] = {}
|
||||
if not models_path.is_dir():
|
||||
raise OperationalException(
|
||||
'Model folders not found. Saved models are required '
|
||||
'to run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
for model_dir in models_path.iterdir():
|
||||
if str(model_dir.name).startswith("sub-train"):
|
||||
model_end_date = int(model_dir.name.split("_")[1])
|
||||
asset = model_dir.name.split("_")[0].replace("sub-train-", "")
|
||||
model_file_name = (
|
||||
f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}"
|
||||
"_model.joblib"
|
||||
)
|
||||
|
||||
model_path_file = Path(model_dir / model_file_name)
|
||||
if model_path_file.is_file():
|
||||
if asset not in assets_end_dates:
|
||||
assets_end_dates[asset] = []
|
||||
assets_end_dates[asset].append(model_end_date)
|
||||
|
||||
return assets_end_dates
|
||||
|
||||
def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Remove all special characters from feature strings (:)
|
||||
:param dataframe: the dataframe that just finished indicator population. (unfiltered)
|
||||
:return: dataframe with cleaned featrue names
|
||||
"""
|
||||
|
||||
spec_chars = [':']
|
||||
for c in spec_chars:
|
||||
dataframe.columns = dataframe.columns.str.replace(c, "")
|
||||
|
||||
return dataframe
|
||||
|
|
|
@ -5,8 +5,7 @@ from abc import ABC, abstractmethod
|
|||
from collections import deque
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from typing import Any, Dict, List, Optional, Literal, Tuple
|
||||
from typing import Any, Dict, List, Literal, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
@ -70,22 +69,23 @@ class IFreqaiModel(ABC):
|
|||
if self.save_backtest_models:
|
||||
logger.info('Backtesting module configured to save all models.')
|
||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||
# set current candle to arbitrary historical date
|
||||
self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc)
|
||||
self.dd.current_candle = self.current_candle
|
||||
self.scanning = False
|
||||
self.ft_params = self.freqai_info["feature_parameters"]
|
||||
self.corr_pairlist: List[str] = self.ft_params.get("include_corr_pairlist", [])
|
||||
self.keras: bool = self.freqai_info.get("keras", False)
|
||||
if self.keras and self.ft_params.get("DI_threshold", 0):
|
||||
self.ft_params["DI_threshold"] = 0
|
||||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||
self.CONV_WIDTH = self.freqai_info.get("conv_width", 2)
|
||||
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
||||
if self.ft_params.get("inlier_metric_window", 0):
|
||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||
self.pair_it = 0
|
||||
self.pair_it_train = 0
|
||||
self.total_pairs = len(self.config.get("exchange", {}).get("pair_whitelist"))
|
||||
self.train_queue = self._set_train_queue()
|
||||
self.last_trade_database_summary: DataFrame = {}
|
||||
self.current_trade_database_summary: DataFrame = {}
|
||||
self.analysis_lock = Lock()
|
||||
self.inference_time: float = 0
|
||||
self.train_time: float = 0
|
||||
self.begin_time: float = 0
|
||||
|
@ -93,7 +93,10 @@ class IFreqaiModel(ABC):
|
|||
self.base_tf_seconds = timeframe_to_seconds(self.config['timeframe'])
|
||||
self.continual_learning = self.freqai_info.get('continual_learning', False)
|
||||
self.plot_features = self.ft_params.get("plot_feature_importances", 0)
|
||||
|
||||
self.corr_dataframes: Dict[str, DataFrame] = {}
|
||||
# get_corr_dataframes is controlling the caching of corr_dataframes
|
||||
# for improved performance. Careful with this boolean.
|
||||
self.get_corr_dataframes: bool = True
|
||||
self._threads: List[threading.Thread] = []
|
||||
self._stop_event = threading.Event()
|
||||
self.strategy: Optional[IStrategy] = None
|
||||
|
@ -140,7 +143,11 @@ class IFreqaiModel(ABC):
|
|||
# the concatenated results for the full backtesting period back to the strategy.
|
||||
elif not self.follow_mode:
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
if self.dk.backtest_live_models:
|
||||
logger.info(
|
||||
f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)")
|
||||
else:
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
|
@ -269,25 +276,20 @@ class IFreqaiModel(ABC):
|
|||
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
|
||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||
|
||||
trained_timestamp = tr_train
|
||||
tr_train_startts_str = datetime.fromtimestamp(
|
||||
tr_train.startts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
tr_train_stopts_str = datetime.fromtimestamp(
|
||||
tr_train.stopts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
logger.info(
|
||||
f"Training {pair}, {self.pair_it}/{self.total_pairs} pairs"
|
||||
f" from {tr_train_startts_str} to {tr_train_stopts_str}, {train_it}/{total_trains} "
|
||||
"trains"
|
||||
)
|
||||
if not self.ensure_data_exists(dataframe_backtest, tr_backtest, pair):
|
||||
continue
|
||||
|
||||
trained_timestamp_int = int(trained_timestamp.stopts)
|
||||
dk.set_paths(pair, trained_timestamp_int)
|
||||
self.log_backtesting_progress(tr_train, pair, train_it, total_trains)
|
||||
|
||||
dk.set_new_model_names(pair, trained_timestamp)
|
||||
timestamp_model_id = int(tr_train.stopts)
|
||||
if dk.backtest_live_models:
|
||||
timestamp_model_id = int(tr_backtest.startts)
|
||||
|
||||
if dk.check_if_backtest_prediction_exists():
|
||||
dk.set_paths(pair, timestamp_model_id)
|
||||
|
||||
dk.set_new_model_names(pair, timestamp_model_id)
|
||||
|
||||
if dk.check_if_backtest_prediction_is_valid(len(dataframe_backtest)):
|
||||
self.dd.load_metadata(dk)
|
||||
dk.find_features(dataframe_train)
|
||||
self.check_if_feature_list_matches_strategy(dk)
|
||||
|
@ -299,7 +301,7 @@ class IFreqaiModel(ABC):
|
|||
dk.find_labels(dataframe_train)
|
||||
self.model = self.train(dataframe_train, pair, dk)
|
||||
self.dd.pair_dict[pair]["trained_timestamp"] = int(
|
||||
trained_timestamp.stopts)
|
||||
tr_train.stopts)
|
||||
if self.plot_features:
|
||||
plot_feature_importance(self.model, pair, dk, self.plot_features)
|
||||
if self.save_backtest_models:
|
||||
|
@ -351,6 +353,7 @@ class IFreqaiModel(ABC):
|
|||
if self.dd.historic_data:
|
||||
self.dd.update_historic_data(strategy, dk)
|
||||
logger.debug(f'Updating historic data on pair {metadata["pair"]}')
|
||||
self.track_current_candle()
|
||||
|
||||
if not self.follow_mode:
|
||||
|
||||
|
@ -377,10 +380,10 @@ class IFreqaiModel(ABC):
|
|||
# load the model and associated data into the data kitchen
|
||||
self.model = self.dd.load_data(metadata["pair"], dk)
|
||||
|
||||
with self.analysis_lock:
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
dataframe = dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"],
|
||||
do_corr_pairs=self.get_corr_dataframes
|
||||
)
|
||||
|
||||
if not self.model:
|
||||
logger.warning(
|
||||
|
@ -389,6 +392,9 @@ class IFreqaiModel(ABC):
|
|||
self.dd.return_null_values_to_strategy(dataframe, dk)
|
||||
return dk
|
||||
|
||||
if self.corr_pairlist:
|
||||
dataframe = self.cache_corr_pairlist_dfs(dataframe, dk)
|
||||
|
||||
dk.find_labels(dataframe)
|
||||
|
||||
self.build_strategy_return_arrays(dataframe, dk, metadata["pair"], trained_timestamp)
|
||||
|
@ -572,10 +578,9 @@ class IFreqaiModel(ABC):
|
|||
data_load_timerange, pair, dk
|
||||
)
|
||||
|
||||
with self.analysis_lock:
|
||||
unfiltered_dataframe = dk.use_strategy_to_populate_indicators(
|
||||
strategy, corr_dataframes, base_dataframes, pair
|
||||
)
|
||||
unfiltered_dataframe = dk.use_strategy_to_populate_indicators(
|
||||
strategy, corr_dataframes, base_dataframes, pair
|
||||
)
|
||||
|
||||
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
|
||||
|
||||
|
@ -586,7 +591,7 @@ class IFreqaiModel(ABC):
|
|||
model = self.train(unfiltered_dataframe, pair, dk)
|
||||
|
||||
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
|
||||
dk.set_new_model_names(pair, new_trained_timerange)
|
||||
dk.set_new_model_names(pair, new_trained_timerange.stopts)
|
||||
self.dd.save_data(model, pair, dk)
|
||||
|
||||
if self.plot_features:
|
||||
|
@ -751,6 +756,87 @@ class IFreqaiModel(ABC):
|
|||
f'Best approximation queue: {best_queue}')
|
||||
return best_queue
|
||||
|
||||
def cache_corr_pairlist_dfs(self, dataframe: DataFrame, dk: FreqaiDataKitchen) -> DataFrame:
|
||||
"""
|
||||
Cache the corr_pairlist dfs to speed up performance for subsequent pairs during the
|
||||
current candle.
|
||||
:param dataframe: strategy fed dataframe
|
||||
:param dk: datakitchen object for current asset
|
||||
:return: dataframe to attach/extract cached corr_pair dfs to/from.
|
||||
"""
|
||||
|
||||
if self.get_corr_dataframes:
|
||||
self.corr_dataframes = dk.extract_corr_pair_columns_from_populated_indicators(dataframe)
|
||||
if not self.corr_dataframes:
|
||||
logger.warning("Couldn't cache corr_pair dataframes for improved performance. "
|
||||
"Consider ensuring that the full coin/stake, e.g. XYZ/USD, "
|
||||
"is included in the column names when you are creating features "
|
||||
"in `populate_any_indicators()`.")
|
||||
self.get_corr_dataframes = not bool(self.corr_dataframes)
|
||||
elif self.corr_dataframes:
|
||||
dataframe = dk.attach_corr_pair_columns(
|
||||
dataframe, self.corr_dataframes, dk.pair)
|
||||
|
||||
return dataframe
|
||||
|
||||
def track_current_candle(self):
|
||||
"""
|
||||
Checks if the latest candle appended by the datadrawer is
|
||||
equivalent to the latest candle seen by FreqAI. If not, it
|
||||
asks to refresh the cached corr_dfs, and resets the pair
|
||||
counter.
|
||||
"""
|
||||
if self.dd.current_candle > self.current_candle:
|
||||
self.get_corr_dataframes = True
|
||||
self.pair_it = 1
|
||||
self.current_candle = self.dd.current_candle
|
||||
|
||||
def ensure_data_exists(self, dataframe_backtest: DataFrame,
|
||||
tr_backtest: TimeRange, pair: str) -> bool:
|
||||
"""
|
||||
Check if the dataframe is empty, if not, report useful information to user.
|
||||
:param dataframe_backtest: the backtesting dataframe, maybe empty.
|
||||
:param tr_backtest: current backtesting timerange.
|
||||
:param pair: current pair
|
||||
:return: if the data exists or not
|
||||
"""
|
||||
if self.config.get("freqai_backtest_live_models", False) and len(dataframe_backtest) == 0:
|
||||
tr_backtest_startts_str = datetime.fromtimestamp(
|
||||
tr_backtest.startts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
tr_backtest_stopts_str = datetime.fromtimestamp(
|
||||
tr_backtest.stopts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
logger.info(f"No data found for pair {pair} from {tr_backtest_startts_str} "
|
||||
f" from {tr_backtest_startts_str} to {tr_backtest_stopts_str}. "
|
||||
"Probably more than one training within the same candle period.")
|
||||
return False
|
||||
return True
|
||||
|
||||
def log_backtesting_progress(self, tr_train: TimeRange, pair: str,
|
||||
train_it: int, total_trains: int):
|
||||
"""
|
||||
Log the backtesting progress so user knows how many pairs have been trained and
|
||||
how many more pairs/trains remain.
|
||||
:param tr_train: the training timerange
|
||||
:param train_it: the train iteration for the current pair (the sliding window progress)
|
||||
:param pair: the current pair
|
||||
:param total_trains: total trains (total number of slides for the sliding window)
|
||||
"""
|
||||
tr_train_startts_str = datetime.fromtimestamp(
|
||||
tr_train.startts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
tr_train_stopts_str = datetime.fromtimestamp(
|
||||
tr_train.stopts,
|
||||
tz=timezone.utc).strftime(DATETIME_PRINT_FORMAT)
|
||||
|
||||
if not self.config.get("freqai_backtest_live_models", False):
|
||||
logger.info(
|
||||
f"Training {pair}, {self.pair_it}/{self.total_pairs} pairs"
|
||||
f" from {tr_train_startts_str} "
|
||||
f"to {tr_train_stopts_str}, {train_it}/{total_trains} "
|
||||
"trains"
|
||||
)
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from catboost import CatBoostClassifier, Pool
|
||||
|
||||
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||
from freqtrade.freqai.base_models.FreqaiMultiOutputClassifier import FreqaiMultiOutputClassifier
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
cbc = CatBoostClassifier(
|
||||
allow_writing_files=True,
|
||||
loss_function='MultiClass',
|
||||
train_dir=Path(dk.data_path),
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
||||
sample_weight = data_dictionary["train_weights"]
|
||||
|
||||
eval_sets = [None] * y.shape[1]
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
eval_sets = [None] * data_dictionary['test_labels'].shape[1]
|
||||
|
||||
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||
eval_sets[i] = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"].iloc[:, i],
|
||||
weight=data_dictionary["test_weights"],
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
if init_model:
|
||||
init_models = init_model.estimators_
|
||||
else:
|
||||
init_models = [None] * y.shape[1]
|
||||
|
||||
fit_params = []
|
||||
for i in range(len(eval_sets)):
|
||||
fit_params.append({
|
||||
'eval_set': eval_sets[i], 'init_model': init_models[i],
|
||||
'log_cout': sys.stdout, 'log_cerr': sys.stderr,
|
||||
})
|
||||
|
||||
model = FreqaiMultiOutputClassifier(estimator=cbc)
|
||||
thread_training = self.freqai_info.get('multitarget_parallel_training', False)
|
||||
if thread_training:
|
||||
model.n_jobs = y.shape[1]
|
||||
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||
|
||||
return model
|
|
@ -0,0 +1,64 @@
|
|||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from lightgbm import LGBMClassifier
|
||||
|
||||
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||
from freqtrade.freqai.base_models.FreqaiMultiOutputClassifier import FreqaiMultiOutputClassifier
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LightGBMClassifierMultiTarget(BaseClassifierModel):
|
||||
"""
|
||||
User created prediction model. The class needs to override three necessary
|
||||
functions, predict(), train(), fit(). The class inherits ModelHandler which
|
||||
has its own DataHandler where data is held, saved, loaded, and managed.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
"""
|
||||
|
||||
lgb = LGBMClassifier(**self.model_training_parameters)
|
||||
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
sample_weight = data_dictionary["train_weights"]
|
||||
|
||||
eval_weights = None
|
||||
eval_sets = [None] * y.shape[1]
|
||||
|
||||
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
|
||||
eval_weights = [data_dictionary["test_weights"]]
|
||||
eval_sets = [(None, None)] * data_dictionary['test_labels'].shape[1] # type: ignore
|
||||
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||
eval_sets[i] = ( # type: ignore
|
||||
data_dictionary["test_features"],
|
||||
data_dictionary["test_labels"].iloc[:, i]
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
if init_model:
|
||||
init_models = init_model.estimators_
|
||||
else:
|
||||
init_models = [None] * y.shape[1]
|
||||
|
||||
fit_params = []
|
||||
for i in range(len(eval_sets)):
|
||||
fit_params.append(
|
||||
{'eval_set': eval_sets[i], 'eval_sample_weight': eval_weights,
|
||||
'init_model': init_models[i]})
|
||||
|
||||
model = FreqaiMultiOutputClassifier(estimator=lgb)
|
||||
thread_training = self.freqai_info.get('multitarget_parallel_training', False)
|
||||
if thread_training:
|
||||
model.n_jobs = y.shape[1]
|
||||
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||
|
||||
return model
|
|
@ -218,3 +218,19 @@ def record_params(config: Dict[str, Any], full_path: Path) -> None:
|
|||
default=str,
|
||||
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN
|
||||
)
|
||||
|
||||
|
||||
def get_timerange_backtest_live_models(config: Config) -> str:
|
||||
"""
|
||||
Returns a formated timerange for backtest live/ready models
|
||||
:param config: Configuration dictionary
|
||||
|
||||
:return: a string timerange (format example: '20220801-20220822')
|
||||
"""
|
||||
dk = FreqaiDataKitchen(config)
|
||||
models_path = dk.get_full_models_path(config)
|
||||
timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path)
|
||||
start_date = datetime.fromtimestamp(timerange.startts, tz=timezone.utc)
|
||||
end_date = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc)
|
||||
tr = f"{start_date.strftime('%Y%m%d')}-{end_date.strftime('%Y%m%d')}"
|
||||
return tr
|
||||
|
|
|
@ -134,6 +134,10 @@ class Backtesting:
|
|||
self.fee = self.exchange.get_fee(symbol=self.pairlists.whitelist[0])
|
||||
self.precision_mode = self.exchange.precisionMode
|
||||
|
||||
if self.config.get('freqai_backtest_live_models', False):
|
||||
from freqtrade.freqai.utils import get_timerange_backtest_live_models
|
||||
self.config['timerange'] = get_timerange_backtest_live_models(self.config)
|
||||
|
||||
self.timerange = TimeRange.parse_timerange(
|
||||
None if self.config.get('timerange') is None else str(self.config.get('timerange')))
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ class LocalTrade():
|
|||
self.close(order.safe_price)
|
||||
else:
|
||||
self.recalc_trade_from_orders()
|
||||
elif order.ft_order_side == 'stoploss':
|
||||
elif order.ft_order_side == 'stoploss' and order.status not in ('canceled', 'open'):
|
||||
self.stoploss_order_id = None
|
||||
self.close_rate_requested = self.stop_loss
|
||||
self.exit_reason = ExitType.STOPLOSS_ON_EXCHANGE.value
|
||||
|
|
|
@ -36,7 +36,6 @@ class IPairList(LoggingMixin, ABC):
|
|||
self._pairlistconfig = pairlistconfig
|
||||
self._pairlist_pos = pairlist_pos
|
||||
self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)
|
||||
self._last_refresh = 0
|
||||
LoggingMixin.__init__(self, logger, self.refresh_period)
|
||||
|
||||
@property
|
||||
|
|
|
@ -3,16 +3,20 @@ Shuffle pair list filter
|
|||
"""
|
||||
import logging
|
||||
import random
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.exchange.types import Tickers
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||
from freqtrade.util.periodic_cache import PeriodicCache
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ShuffleValues = Literal['candle', 'iteration']
|
||||
|
||||
|
||||
class ShuffleFilter(IPairList):
|
||||
|
||||
|
@ -31,6 +35,9 @@ class ShuffleFilter(IPairList):
|
|||
logger.info(f"Backtesting mode detected, applying seed value: {self._seed}")
|
||||
|
||||
self._random = random.Random(self._seed)
|
||||
self._shuffle_freq: ShuffleValues = pairlistconfig.get('shuffle_frequency', 'candle')
|
||||
self.__pairlist_cache = PeriodicCache(
|
||||
maxsize=1000, ttl=timeframe_to_seconds(self._config['timeframe']))
|
||||
|
||||
@property
|
||||
def needstickers(self) -> bool:
|
||||
|
@ -45,7 +52,7 @@ class ShuffleFilter(IPairList):
|
|||
"""
|
||||
Short whitelist method description - used for startup-messages
|
||||
"""
|
||||
return (f"{self.name} - Shuffling pairs" +
|
||||
return (f"{self.name} - Shuffling pairs every {self._shuffle_freq}" +
|
||||
(f", seed = {self._seed}." if self._seed is not None else "."))
|
||||
|
||||
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
|
||||
|
@ -56,7 +63,13 @@ class ShuffleFilter(IPairList):
|
|||
:param tickers: Tickers (from exchange.get_tickers). May be cached.
|
||||
:return: new whitelist
|
||||
"""
|
||||
pairlist_bef = tuple(pairlist)
|
||||
pairlist_new = self.__pairlist_cache.get(pairlist_bef)
|
||||
if pairlist_new and self._shuffle_freq == 'candle':
|
||||
# Use cached pairlist.
|
||||
return pairlist_new
|
||||
# Shuffle is done inplace
|
||||
self._random.shuffle(pairlist)
|
||||
self.__pairlist_cache[pairlist_bef] = pairlist
|
||||
|
||||
return pairlist
|
||||
|
|
|
@ -127,13 +127,6 @@ async def message_endpoint(
|
|||
except Exception as e:
|
||||
logger.info(f"Consumer connection failed - {channel}: {e}")
|
||||
logger.debug(e, exc_info=e)
|
||||
finally:
|
||||
await channel_manager.on_disconnect(ws)
|
||||
|
||||
else:
|
||||
if channel:
|
||||
await channel_manager.on_disconnect(ws)
|
||||
await ws.close()
|
||||
|
||||
except RuntimeError:
|
||||
# WebSocket was closed
|
||||
|
@ -144,4 +137,5 @@ async def message_endpoint(
|
|||
# Log tracebacks to keep track of what errors are happening
|
||||
logger.exception(e)
|
||||
finally:
|
||||
await channel_manager.on_disconnect(ws)
|
||||
if channel:
|
||||
await channel_manager.on_disconnect(ws)
|
||||
|
|
|
@ -197,6 +197,7 @@ class ApiServer(RPCHandler):
|
|||
# Get data from queue
|
||||
message: WSMessageSchemaType = await async_queue.get()
|
||||
logger.debug(f"Found message of type: {message.get('type')}")
|
||||
async_queue.task_done()
|
||||
# Broadcast it
|
||||
await self._ws_channel_manager.broadcast(message)
|
||||
except asyncio.CancelledError:
|
||||
|
@ -210,6 +211,9 @@ class ApiServer(RPCHandler):
|
|||
# Disconnect channels and stop the loop on cancel
|
||||
await self._ws_channel_manager.disconnect_all()
|
||||
self._ws_loop.stop()
|
||||
# Avoid adding more items to the queue if they aren't
|
||||
# going to get broadcasted.
|
||||
self._ws_queue = None
|
||||
|
||||
def start_api(self):
|
||||
"""
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from threading import RLock
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
from uuid import uuid4
|
||||
|
@ -46,7 +47,7 @@ class WebSocketChannel:
|
|||
self._relay_task = asyncio.create_task(self.relay())
|
||||
|
||||
# Internal event to signify a closed websocket
|
||||
self._closed = False
|
||||
self._closed = asyncio.Event()
|
||||
|
||||
# Wrap the WebSocket in the Serializing class
|
||||
self._wrapped_ws = self._serializer_cls(self._websocket)
|
||||
|
@ -73,15 +74,26 @@ class WebSocketChannel:
|
|||
Add the data to the queue to be sent.
|
||||
:returns: True if data added to queue, False otherwise
|
||||
"""
|
||||
|
||||
# This block only runs if the queue is full, it will wait
|
||||
# until self.drain_timeout for the relay to drain the outgoing queue
|
||||
# We can't use asyncio.wait_for here because the queue may have been created with a
|
||||
# different eventloop
|
||||
start = time.time()
|
||||
while self.queue.full():
|
||||
await asyncio.sleep(1)
|
||||
if (time.time() - start) > self.drain_timeout:
|
||||
return False
|
||||
|
||||
# If for some reason the queue is still full, just return False
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self.queue.put(data),
|
||||
timeout=self.drain_timeout
|
||||
)
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
self.queue.put_nowait(data)
|
||||
except asyncio.QueueFull:
|
||||
return False
|
||||
|
||||
# If we got here everything is ok
|
||||
return True
|
||||
|
||||
async def recv(self):
|
||||
"""
|
||||
Receive data on the wrapped websocket
|
||||
|
@ -99,14 +111,19 @@ class WebSocketChannel:
|
|||
Close the WebSocketChannel
|
||||
"""
|
||||
|
||||
self._closed = True
|
||||
try:
|
||||
await self.raw_websocket.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self._closed.set()
|
||||
self._relay_task.cancel()
|
||||
|
||||
def is_closed(self) -> bool:
|
||||
"""
|
||||
Closed flag
|
||||
"""
|
||||
return self._closed
|
||||
return self._closed.is_set()
|
||||
|
||||
def set_subscriptions(self, subscriptions: List[str] = []) -> None:
|
||||
"""
|
||||
|
@ -129,7 +146,7 @@ class WebSocketChannel:
|
|||
Relay messages from the channel's queue and send them out. This is started
|
||||
as a task.
|
||||
"""
|
||||
while True:
|
||||
while not self._closed.is_set():
|
||||
message = await self.queue.get()
|
||||
try:
|
||||
await self._send(message)
|
||||
|
|
|
@ -264,10 +264,10 @@ class ExternalMessageConsumer:
|
|||
# We haven't received data yet. Check the connection and continue.
|
||||
try:
|
||||
# ping
|
||||
ping = await channel.ping()
|
||||
pong = await channel.ping()
|
||||
latency = (await asyncio.wait_for(pong, timeout=self.ping_timeout) * 1000)
|
||||
|
||||
await asyncio.wait_for(ping, timeout=self.ping_timeout)
|
||||
logger.debug(f"Connection to {channel} still alive...")
|
||||
logger.info(f"Connection to {channel} still alive, latency: {latency}ms")
|
||||
|
||||
continue
|
||||
except (websockets.exceptions.ConnectionClosed):
|
||||
|
@ -276,7 +276,7 @@ class ExternalMessageConsumer:
|
|||
await asyncio.sleep(self.sleep_time)
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"Ping error {channel} - retrying in {self.sleep_time}s")
|
||||
logger.warning(f"Ping error {channel} - {e} - retrying in {self.sleep_time}s")
|
||||
logger.debug(e, exc_info=e)
|
||||
await asyncio.sleep(self.sleep_time)
|
||||
|
||||
|
|
|
@ -110,8 +110,6 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||
:param informative: the dataframe associated with the informative pair
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
|
@ -119,13 +117,13 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
||||
informative[f"%-{coin}relative_volume-period_{t}"] = (
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||
informative[f"%-{pair}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"%-{pair}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
informative[f"%-{pair}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
||||
informative[f"%-{pair}relative_volume-period_{t}"] = (
|
||||
informative["volume"] / informative["volume"].rolling(t).mean()
|
||||
)
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ class FreqaiExampleStrategy(IStrategy):
|
|||
"""
|
||||
Function designed to automatically generate, name and merge features
|
||||
from user indicated timeframes in the configuration file. User controls the indicators
|
||||
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||
passed to the training/prediction by prepending indicators with `f'%-{pair}`
|
||||
(see convention below). I.e. user should not prepend any supporting metrics
|
||||
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||
model.
|
||||
|
@ -63,8 +63,6 @@ class FreqaiExampleStrategy(IStrategy):
|
|||
:param informative: the dataframe associated with the informative pair
|
||||
"""
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
|
@ -72,36 +70,36 @@ class FreqaiExampleStrategy(IStrategy):
|
|||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||
informative[f"%-{coin}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"%-{coin}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, timeperiod=t)
|
||||
informative[f"%-{pair}sma-period_{t}"] = ta.SMA(informative, timeperiod=t)
|
||||
informative[f"%-{pair}ema-period_{t}"] = ta.EMA(informative, timeperiod=t)
|
||||
|
||||
bollinger = qtpylib.bollinger_bands(
|
||||
qtpylib.typical_price(informative), window=t, stds=2.2
|
||||
)
|
||||
informative[f"{coin}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||
informative[f"{coin}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||
informative[f"{coin}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||
informative[f"{pair}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||
informative[f"{pair}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||
informative[f"{pair}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||
|
||||
informative[f"%-{coin}bb_width-period_{t}"] = (
|
||||
informative[f"{coin}bb_upperband-period_{t}"]
|
||||
- informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
) / informative[f"{coin}bb_middleband-period_{t}"]
|
||||
informative[f"%-{coin}close-bb_lower-period_{t}"] = (
|
||||
informative["close"] / informative[f"{coin}bb_lowerband-period_{t}"]
|
||||
informative[f"%-{pair}bb_width-period_{t}"] = (
|
||||
informative[f"{pair}bb_upperband-period_{t}"]
|
||||
- informative[f"{pair}bb_lowerband-period_{t}"]
|
||||
) / informative[f"{pair}bb_middleband-period_{t}"]
|
||||
informative[f"%-{pair}close-bb_lower-period_{t}"] = (
|
||||
informative["close"] / informative[f"{pair}bb_lowerband-period_{t}"]
|
||||
)
|
||||
|
||||
informative[f"%-{coin}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
||||
informative[f"%-{pair}roc-period_{t}"] = ta.ROC(informative, timeperiod=t)
|
||||
|
||||
informative[f"%-{coin}relative_volume-period_{t}"] = (
|
||||
informative[f"%-{pair}relative_volume-period_{t}"] = (
|
||||
informative["volume"] / informative["volume"].rolling(t).mean()
|
||||
)
|
||||
|
||||
informative[f"%-{coin}pct-change"] = informative["close"].pct_change()
|
||||
informative[f"%-{coin}raw_volume"] = informative["volume"]
|
||||
informative[f"%-{coin}raw_price"] = informative["close"]
|
||||
informative[f"%-{pair}pct-change"] = informative["close"].pct_change()
|
||||
informative[f"%-{pair}raw_volume"] = informative["volume"]
|
||||
informative[f"%-{pair}raw_price"] = informative["close"]
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
|
|
|
@ -150,14 +150,20 @@ class Worker:
|
|||
if timeframe:
|
||||
next_tf = timeframe_to_next_date(timeframe)
|
||||
# Maximum throttling should be until new candle arrives
|
||||
# Offset of 0.2s is added to ensure a new candle has been issued.
|
||||
next_tf_with_offset = next_tf.timestamp() - time.time() + timeframe_offset
|
||||
# Offset is added to ensure a new candle has been issued.
|
||||
next_tft = next_tf.timestamp() - time.time()
|
||||
next_tf_with_offset = next_tft + timeframe_offset
|
||||
if next_tft < sleep_duration and sleep_duration < next_tf_with_offset:
|
||||
# Avoid hitting a new loop between the new candle and the candle with offset
|
||||
sleep_duration = next_tf_with_offset
|
||||
sleep_duration = min(sleep_duration, next_tf_with_offset)
|
||||
sleep_duration = max(sleep_duration, 0.0)
|
||||
# next_iter = datetime.now(timezone.utc) + timedelta(seconds=sleep_duration)
|
||||
|
||||
logger.debug(f"Throttling with '{func.__name__}()': sleep for {sleep_duration:.2f} s, "
|
||||
f"last iteration took {time_passed:.2f} s.")
|
||||
f"last iteration took {time_passed:.2f} s."
|
||||
# f"next: {next_iter}"
|
||||
)
|
||||
self._sleep(sleep_duration)
|
||||
return result
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ flake8==5.0.4
|
|||
flake8-tidy-imports==4.8.0
|
||||
mypy==0.982
|
||||
pre-commit==2.20.0
|
||||
pytest==7.1.3
|
||||
pytest==7.2.0
|
||||
pytest-asyncio==0.20.1
|
||||
pytest-cov==4.0.0
|
||||
pytest-mock==3.10.0
|
||||
|
@ -21,7 +21,7 @@ isort==5.10.1
|
|||
time-machine==2.8.2
|
||||
|
||||
# Convert jupyter notebooks to markdown documents
|
||||
nbconvert==7.2.1
|
||||
nbconvert==7.2.3
|
||||
|
||||
# mypy types
|
||||
types-cachetools==5.2.1
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
# Include all requirements to run the bot.
|
||||
-r requirements.txt
|
||||
-r requirements-plot.txt
|
||||
|
||||
# Required for freqai
|
||||
scikit-learn==1.1.2
|
||||
scikit-learn==1.1.3
|
||||
joblib==1.2.0
|
||||
catboost==1.1; platform_machine != 'aarch64'
|
||||
catboost==1.1.1; platform_machine != 'aarch64'
|
||||
lightgbm==3.3.3
|
||||
xgboost==1.6.2
|
||||
xgboost==1.7.1
|
||||
tensorboard==2.10.1
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
# Required for hyperopt
|
||||
scipy==1.9.3
|
||||
scikit-learn==1.1.2
|
||||
scikit-learn==1.1.3
|
||||
scikit-optimize==0.9.0
|
||||
filelock==3.8.0
|
||||
progressbar2==4.1.1
|
||||
progressbar2==4.2.0
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Include all requirements to run the bot.
|
||||
-r requirements.txt
|
||||
|
||||
plotly==5.10.0
|
||||
plotly==5.11.0
|
||||
|
|
|
@ -1,29 +1,27 @@
|
|||
numpy==1.23.4
|
||||
pandas==1.5.1; platform_machine != 'armv7l'
|
||||
# Piwheels doesn't have 1.5.0 yet.
|
||||
pandas==1.4.3; platform_machine == 'armv7l'
|
||||
pandas==1.5.1
|
||||
pandas-ta==0.3.14b
|
||||
|
||||
ccxt==2.0.58
|
||||
ccxt==2.1.54
|
||||
# Pin cryptography for now due to rust build errors with piwheels
|
||||
cryptography==38.0.1
|
||||
aiohttp==3.8.3
|
||||
SQLAlchemy==1.4.42
|
||||
SQLAlchemy==1.4.43
|
||||
python-telegram-bot==13.14
|
||||
arrow==1.2.3
|
||||
cachetools==4.2.2
|
||||
requests==2.28.1
|
||||
urllib3==1.26.12
|
||||
jsonschema==4.16.0
|
||||
jsonschema==4.17.0
|
||||
TA-Lib==0.4.25
|
||||
technical==1.3.0
|
||||
tabulate==0.9.0
|
||||
pycoingecko==3.0.0
|
||||
pycoingecko==3.1.0
|
||||
jinja2==3.1.2
|
||||
tables==3.7.0
|
||||
blosc==1.10.6
|
||||
joblib==1.2.0
|
||||
pyarrow==9.0.0; platform_machine != 'armv7l'
|
||||
pyarrow==10.0.0; platform_machine != 'armv7l'
|
||||
|
||||
# find first, C search in arrays
|
||||
py_find_1st==1.1.5
|
||||
|
@ -31,7 +29,7 @@ py_find_1st==1.1.5
|
|||
# Load ticker files 30% faster
|
||||
python-rapidjson==1.9
|
||||
# Properly format api responses
|
||||
orjson==3.8.0
|
||||
orjson==3.8.1
|
||||
|
||||
# Notify systemd
|
||||
sdnotify==0.3.2
|
||||
|
@ -39,16 +37,16 @@ sdnotify==0.3.2
|
|||
# API Server
|
||||
fastapi==0.85.1
|
||||
pydantic==1.10.2
|
||||
uvicorn==0.18.3
|
||||
uvicorn==0.19.0
|
||||
pyjwt==2.6.0
|
||||
aiofiles==22.1.0
|
||||
psutil==5.9.2
|
||||
psutil==5.9.3
|
||||
|
||||
# Support for colorized terminal output
|
||||
colorama==0.4.5
|
||||
colorama==0.4.6
|
||||
# Building config files interactively
|
||||
questionary==1.10.0
|
||||
prompt-toolkit==3.0.31
|
||||
prompt-toolkit==3.0.32
|
||||
# Extensions to datetime library
|
||||
python-dateutil==2.8.2
|
||||
|
||||
|
@ -56,5 +54,5 @@ python-dateutil==2.8.2
|
|||
schedule==1.1.0
|
||||
|
||||
#WS Messages
|
||||
websockets==10.3
|
||||
websockets==10.4
|
||||
janus==1.0.0
|
||||
|
|
|
@ -18,7 +18,6 @@ import orjson
|
|||
import pandas
|
||||
import rapidjson
|
||||
import websockets
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
|
||||
logger = logging.getLogger("WebSocketClient")
|
||||
|
@ -28,7 +27,7 @@ logger = logging.getLogger("WebSocketClient")
|
|||
|
||||
def setup_logging(filename: str):
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler(filename),
|
||||
|
@ -75,16 +74,15 @@ def load_config(configfile):
|
|||
|
||||
def readable_timedelta(delta):
|
||||
"""
|
||||
Convert a dateutil.relativedelta to a readable format
|
||||
Convert a millisecond delta to a readable format
|
||||
|
||||
:param delta: A dateutil.relativedelta
|
||||
:param delta: A delta between two timestamps in milliseconds
|
||||
:returns: The readable time difference string
|
||||
"""
|
||||
attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds', 'microseconds']
|
||||
return ", ".join([
|
||||
'%d %s' % (getattr(delta, attr), attr if getattr(delta, attr) > 0 else attr[:-1])
|
||||
for attr in attrs if getattr(delta, attr)
|
||||
])
|
||||
seconds, milliseconds = divmod(delta, 1000)
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
|
||||
return f"{int(minutes)}:{int(seconds)}.{int(milliseconds)}"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
|
@ -170,8 +168,8 @@ class ClientProtocol:
|
|||
|
||||
def _calculate_time_difference(self):
|
||||
old_last_received_at = self._LAST_RECEIVED_AT
|
||||
self._LAST_RECEIVED_AT = time.time() * 1e6
|
||||
time_delta = relativedelta(microseconds=(self._LAST_RECEIVED_AT - old_last_received_at))
|
||||
self._LAST_RECEIVED_AT = time.time() * 1e3
|
||||
time_delta = self._LAST_RECEIVED_AT - old_last_received_at
|
||||
|
||||
return readable_timedelta(time_delta)
|
||||
|
||||
|
@ -242,12 +240,10 @@ async def create_client(
|
|||
):
|
||||
# Try pinging
|
||||
try:
|
||||
pong = ws.ping()
|
||||
await asyncio.wait_for(
|
||||
pong,
|
||||
timeout=ping_timeout
|
||||
)
|
||||
logger.info("Connection still alive...")
|
||||
pong = await ws.ping()
|
||||
latency = (await asyncio.wait_for(pong, timeout=ping_timeout) * 1000)
|
||||
|
||||
logger.info(f"Connection still alive, latency: {latency}ms")
|
||||
|
||||
continue
|
||||
|
||||
|
@ -272,6 +268,7 @@ async def create_client(
|
|||
websockets.exceptions.ConnectionClosedError,
|
||||
websockets.exceptions.ConnectionClosedOK
|
||||
):
|
||||
logger.info("Connection was closed")
|
||||
# Just keep trying to connect again indefinitely
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# pragma pylint: disable=missing-docstring, protected-access, C0103
|
||||
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
@ -154,6 +155,23 @@ def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
|
|||
assert df.columns.equals(df1.columns)
|
||||
|
||||
|
||||
def test_datahandler_ohlcv_data_min_max(testdatadir):
|
||||
dh = JsonDataHandler(testdatadir)
|
||||
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot')
|
||||
assert len(min_max) == 2
|
||||
|
||||
# Empty pair
|
||||
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot')
|
||||
assert len(min_max) == 2
|
||||
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
assert min_max[0] == min_max[1]
|
||||
# Empty pair2
|
||||
min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '4m', 'spot')
|
||||
assert len(min_max) == 2
|
||||
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
assert min_max[0] == min_max[1]
|
||||
|
||||
|
||||
def test_datahandler__check_empty_df(testdatadir, caplog):
|
||||
dh = JsonDataHandler(testdatadir)
|
||||
expected_text = r"Price jump in UNITTEST/USDT, 1h, spot between"
|
||||
|
|
|
@ -3,8 +3,11 @@ from datetime import datetime, timezone
|
|||
from pathlib import Path
|
||||
from unittest.mock import PropertyMock
|
||||
|
||||
import pytest
|
||||
|
||||
from freqtrade.commands.optimize_commands import setup_optimize_configuration
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.optimize.backtesting import Backtesting
|
||||
from tests.conftest import (CURRENT_TEST_STRATEGY, get_args, log_has_re, patch_exchange,
|
||||
patched_configuration_load_config_file)
|
||||
|
@ -51,3 +54,32 @@ def test_freqai_backtest_load_data(freqai_conf, mocker, caplog):
|
|||
assert log_has_re('Increasing startup_candle_count for freqai to.*', caplog)
|
||||
|
||||
Backtesting.cleanup()
|
||||
|
||||
|
||||
def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testdatadir, caplog):
|
||||
patch_exchange(mocker)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT']))
|
||||
mocker.patch('freqtrade.optimize.backtesting.history.load_data')
|
||||
mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now))
|
||||
freqai_conf["timerange"] = ""
|
||||
patched_configuration_load_config_file(mocker, freqai_conf)
|
||||
|
||||
args = [
|
||||
'backtesting',
|
||||
'--config', 'config.json',
|
||||
'--datadir', str(testdatadir),
|
||||
'--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'),
|
||||
'--timeframe', '5m',
|
||||
'--freqai-backtest-live-models'
|
||||
]
|
||||
args = get_args(args)
|
||||
bt_config = setup_optimize_configuration(args, RunMode.BACKTEST)
|
||||
|
||||
with pytest.raises(OperationalException,
|
||||
match=r".* Saved models are required to run backtest .*"):
|
||||
Backtesting(bt_config)
|
||||
|
||||
Backtesting.cleanup()
|
||||
|
|
|
@ -22,6 +22,7 @@ def test_update_historic_data(mocker, freqai_conf):
|
|||
historic_candles = len(freqai.dd.historic_data["ADA/BTC"]["5m"])
|
||||
dp_candles = len(strategy.dp.get_pair_dataframe("ADA/BTC", "5m"))
|
||||
candle_difference = dp_candles - historic_candles
|
||||
freqai.dk.pair = "ADA/BTC"
|
||||
freqai.dd.update_historic_data(strategy, freqai.dk)
|
||||
|
||||
updated_historic_candles = len(freqai.dd.historic_data["ADA/BTC"]["5m"])
|
||||
|
|
|
@ -1,13 +1,18 @@
|
|||
import shutil
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from tests.conftest import log_has_re
|
||||
from tests.freqai.conftest import (get_patched_data_kitchen, make_data_dictionary,
|
||||
make_unfiltered_dataframe)
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.utils import get_timerange_backtest_live_models
|
||||
from tests.conftest import get_patched_exchange, log_has_re
|
||||
from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy,
|
||||
make_data_dictionary, make_unfiltered_dataframe)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -159,3 +164,98 @@ def test_make_train_test_datasets(mocker, freqai_conf):
|
|||
assert data_dictionary
|
||||
assert len(data_dictionary) == 7
|
||||
assert len(data_dictionary['train_features'].index) == 1916
|
||||
|
||||
|
||||
def test_get_pairs_timestamp_validation(mocker, freqai_conf):
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||
freqai = strategy.freqai
|
||||
freqai.live = True
|
||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||
freqai_conf['freqai'].update({"identifier": "invalid_id"})
|
||||
model_path = freqai.dk.get_full_models_path(freqai_conf)
|
||||
with pytest.raises(
|
||||
OperationalException,
|
||||
match=r'.*required to run backtest with the freqai-backtest-live-models.*'
|
||||
):
|
||||
freqai.dk.get_assets_timestamps_training_from_ready_models(model_path)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('model', [
|
||||
'LightGBMRegressor'
|
||||
])
|
||||
def test_get_timerange_from_ready_models(mocker, freqai_conf, model):
|
||||
freqai_conf.update({"freqaimodel": model})
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"strategy": "freqai_test_strat"})
|
||||
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||
freqai = strategy.freqai
|
||||
freqai.live = True
|
||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||
timerange = TimeRange.parse_timerange("20180101-20180130")
|
||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||
|
||||
freqai.dd.pair_dict = MagicMock()
|
||||
|
||||
data_load_timerange = TimeRange.parse_timerange("20180101-20180130")
|
||||
|
||||
# 1516233600 (2018-01-18 00:00) - Start Training 1
|
||||
# 1516406400 (2018-01-20 00:00) - End Training 1 (Backtest slice 1)
|
||||
# 1516579200 (2018-01-22 00:00) - End Training 2 (Backtest slice 2)
|
||||
# 1516838400 (2018-01-25 00:00) - End Timerange
|
||||
|
||||
new_timerange = TimeRange("date", "date", 1516233600, 1516406400)
|
||||
freqai.extract_data_and_train_model(
|
||||
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||
|
||||
new_timerange = TimeRange("date", "date", 1516406400, 1516579200)
|
||||
freqai.extract_data_and_train_model(
|
||||
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||
|
||||
model_path = freqai.dk.get_full_models_path(freqai_conf)
|
||||
(backtesting_timerange,
|
||||
pairs_end_dates) = freqai.dk.get_timerange_and_assets_end_dates_from_ready_models(
|
||||
models_path=model_path)
|
||||
|
||||
assert len(pairs_end_dates["ADA"]) == 2
|
||||
assert backtesting_timerange.startts == 1516406400
|
||||
assert backtesting_timerange.stopts == 1516838400
|
||||
|
||||
backtesting_string_timerange = get_timerange_backtest_live_models(freqai_conf)
|
||||
assert backtesting_string_timerange == '20180120-20180125'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('model', [
|
||||
'LightGBMRegressor'
|
||||
])
|
||||
def test_get_full_model_path(mocker, freqai_conf, model):
|
||||
freqai_conf.update({"freqaimodel": model})
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"strategy": "freqai_test_strat"})
|
||||
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
strategy.dp = DataProvider(freqai_conf, exchange)
|
||||
strategy.freqai_info = freqai_conf.get("freqai", {})
|
||||
freqai = strategy.freqai
|
||||
freqai.live = True
|
||||
freqai.dk = FreqaiDataKitchen(freqai_conf)
|
||||
timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
|
||||
|
||||
freqai.dd.pair_dict = MagicMock()
|
||||
|
||||
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
|
||||
new_timerange = TimeRange.parse_timerange("20180120-20180130")
|
||||
|
||||
freqai.extract_data_and_train_model(
|
||||
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
|
||||
|
||||
model_path = freqai.dk.get_full_models_path(freqai_conf)
|
||||
assert model_path.is_dir() is True
|
||||
|
|
|
@ -27,16 +27,16 @@ def is_mac() -> bool:
|
|||
return "Darwin" in machine
|
||||
|
||||
|
||||
@pytest.mark.parametrize('model', [
|
||||
'LightGBMRegressor',
|
||||
'XGBoostRegressor',
|
||||
'XGBoostRFRegressor',
|
||||
'CatboostRegressor',
|
||||
'ReinforcementLearner',
|
||||
'ReinforcementLearner_multiproc',
|
||||
'ReinforcementLearner_test_4ac'
|
||||
@pytest.mark.parametrize('model, pca, dbscan', [
|
||||
('LightGBMRegressor', True, False),
|
||||
('XGBoostRegressor', False, True),
|
||||
('XGBoostRFRegressor', False, False),
|
||||
('CatboostRegressor', False, False),
|
||||
('ReinforcementLearner', False, False),
|
||||
('ReinforcementLearner_multiproc', False, False),
|
||||
('ReinforcementLearner_test_4ac', False, False)
|
||||
])
|
||||
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model):
|
||||
def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan):
|
||||
if is_arm() and model == 'CatboostRegressor':
|
||||
pytest.skip("CatBoost is not supported on ARM")
|
||||
|
||||
|
@ -47,6 +47,8 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model):
|
|||
freqai_conf.update({"freqaimodel": model})
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"strategy": "freqai_test_strat"})
|
||||
freqai_conf['freqai']['feature_parameters'].update({"principal_component_analysis": pca})
|
||||
freqai_conf['freqai']['feature_parameters'].update({"use_DBSCAN_to_remove_outliers": dbscan})
|
||||
|
||||
if 'ReinforcementLearner' in model:
|
||||
model_save_ext = 'zip'
|
||||
|
@ -89,17 +91,19 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model):
|
|||
shutil.rmtree(Path(freqai.dk.full_path))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('model', [
|
||||
'LightGBMRegressorMultiTarget',
|
||||
'XGBoostRegressorMultiTarget',
|
||||
'CatboostRegressorMultiTarget',
|
||||
@pytest.mark.parametrize('model, strat', [
|
||||
('LightGBMRegressorMultiTarget', "freqai_test_multimodel_strat"),
|
||||
('XGBoostRegressorMultiTarget', "freqai_test_multimodel_strat"),
|
||||
('CatboostRegressorMultiTarget', "freqai_test_multimodel_strat"),
|
||||
('LightGBMClassifierMultiTarget', "freqai_test_multimodel_classifier_strat"),
|
||||
('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat")
|
||||
])
|
||||
def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model):
|
||||
if is_arm() and model == 'CatboostRegressorMultiTarget':
|
||||
def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, strat):
|
||||
if is_arm() and 'Catboost' in model:
|
||||
pytest.skip("CatBoost is not supported on ARM")
|
||||
|
||||
freqai_conf.update({"timerange": "20180110-20180130"})
|
||||
freqai_conf.update({"strategy": "freqai_test_multimodel_strat"})
|
||||
freqai_conf.update({"strategy": strat})
|
||||
freqai_conf.update({"freqaimodel": model})
|
||||
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
|
||||
exchange = get_patched_exchange(mocker, freqai_conf)
|
||||
|
@ -216,6 +220,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
|
|||
corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk)
|
||||
|
||||
df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC")
|
||||
df = freqai.cache_corr_pairlist_dfs(df, freqai.dk)
|
||||
for i in range(5):
|
||||
df[f'%-constant_{i}'] = i
|
||||
# df.loc[:, f'%-constant_{i}'] = i
|
||||
|
@ -362,6 +367,7 @@ def test_follow_mode(mocker, freqai_conf):
|
|||
|
||||
df = strategy.dp.get_pair_dataframe('ADA/BTC', '5m')
|
||||
|
||||
freqai.dk.pair = "ADA/BTC"
|
||||
freqai.start_live(df, metadata, strategy, freqai.dk)
|
||||
|
||||
assert len(freqai.dk.return_dataframe.index) == 5702
|
||||
|
|
|
@ -764,6 +764,7 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None:
|
|||
'max_rate': [0.10501, 0.1038888],
|
||||
'is_open': [False, False],
|
||||
'enter_tag': [None, None],
|
||||
"leverage": [1.0, 1.0],
|
||||
"is_short": [False, False],
|
||||
'open_timestamp': [1517251200000, 1517283000000],
|
||||
'close_timestamp': [1517265300000, 1517285400000],
|
||||
|
@ -788,13 +789,14 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None:
|
|||
assert len(t['orders']) == 2
|
||||
ln = data_pair.loc[data_pair["date"] == t["open_date"]]
|
||||
# Check open trade rate alignes to open rate
|
||||
assert ln is not None
|
||||
assert not ln.empty
|
||||
assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6)
|
||||
# check close trade rate alignes to close rate or is between high and low
|
||||
ln = data_pair.loc[data_pair["date"] == t["close_date"]]
|
||||
assert (round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or
|
||||
round(ln.iloc[0]["low"], 6) < round(
|
||||
t["close_rate"], 6) < round(ln.iloc[0]["high"], 6))
|
||||
ln1 = data_pair.loc[data_pair["date"] == t["close_date"]]
|
||||
assert not ln1.empty
|
||||
assert (round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or
|
||||
round(ln1.iloc[0]["low"], 6) < round(
|
||||
t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6))
|
||||
|
||||
|
||||
def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) -> None:
|
||||
|
|
|
@ -72,6 +72,7 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) ->
|
|||
'max_rate': [0.10481985, 0.1038888],
|
||||
'is_open': [False, False],
|
||||
'enter_tag': [None, None],
|
||||
'leverage': [1.0, 1.0],
|
||||
'is_short': [False, False],
|
||||
'open_timestamp': [1517251200000, 1517283000000],
|
||||
'close_timestamp': [1517265300000, 1517285400000],
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
import logging
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import timedelta
|
||||
from unittest.mock import MagicMock, PropertyMock
|
||||
|
||||
import pandas as pd
|
||||
|
@ -719,15 +721,26 @@ def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None:
|
|||
def test_ShuffleFilter_init(mocker, whitelist_conf, caplog) -> None:
|
||||
whitelist_conf['pairlists'] = [
|
||||
{"method": "StaticPairList"},
|
||||
{"method": "ShuffleFilter", "seed": 42}
|
||||
{"method": "ShuffleFilter", "seed": 43}
|
||||
]
|
||||
|
||||
exchange = get_patched_exchange(mocker, whitelist_conf)
|
||||
PairListManager(exchange, whitelist_conf)
|
||||
assert log_has("Backtesting mode detected, applying seed value: 42", caplog)
|
||||
plm = PairListManager(exchange, whitelist_conf)
|
||||
assert log_has("Backtesting mode detected, applying seed value: 43", caplog)
|
||||
|
||||
with time_machine.travel("2021-09-01 05:01:00 +00:00") as t:
|
||||
plm.refresh_pairlist()
|
||||
pl1 = deepcopy(plm.whitelist)
|
||||
plm.refresh_pairlist()
|
||||
assert plm.whitelist == pl1
|
||||
|
||||
t.shift(timedelta(minutes=10))
|
||||
plm.refresh_pairlist()
|
||||
assert plm.whitelist != pl1
|
||||
|
||||
caplog.clear()
|
||||
whitelist_conf['runmode'] = RunMode.DRY_RUN
|
||||
PairListManager(exchange, whitelist_conf)
|
||||
plm = PairListManager(exchange, whitelist_conf)
|
||||
assert not log_has("Backtesting mode detected, applying seed value: 42", caplog)
|
||||
assert log_has("Live mode detected, not applying seed.", caplog)
|
||||
|
||||
|
|
|
@ -1461,6 +1461,7 @@ def test_api_strategies(botclient, tmpdir):
|
|||
'StrategyTestV3Futures',
|
||||
'freqai_rl_test_strat',
|
||||
'freqai_test_classifier',
|
||||
'freqai_test_multimodel_classifier_strat',
|
||||
'freqai_test_multimodel_strat',
|
||||
'freqai_test_strat'
|
||||
]}
|
||||
|
|
138
tests/strategy/strats/freqai_test_multimodel_classifier_strat.py
Normal file
138
tests/strategy/strats/freqai_test_multimodel_classifier_strat.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
import logging
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import talib.abstract as ta
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.strategy import DecimalParameter, IntParameter, IStrategy, merge_informative_pair
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class freqai_test_multimodel_classifier_strat(IStrategy):
|
||||
"""
|
||||
Test strategy - used for testing freqAI multimodel functionalities.
|
||||
DO not use in production.
|
||||
"""
|
||||
|
||||
minimal_roi = {"0": 0.1, "240": -1}
|
||||
|
||||
plot_config = {
|
||||
"main_plot": {},
|
||||
"subplots": {
|
||||
"prediction": {"prediction": {"color": "blue"}},
|
||||
"target_roi": {
|
||||
"target_roi": {"color": "brown"},
|
||||
},
|
||||
"do_predict": {
|
||||
"do_predict": {"color": "brown"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
process_only_new_candles = True
|
||||
stoploss = -0.05
|
||||
use_exit_signal = True
|
||||
startup_candle_count: int = 300
|
||||
can_short = False
|
||||
|
||||
linear_roi_offset = DecimalParameter(
|
||||
0.00, 0.02, default=0.005, space="sell", optimize=False, load=True
|
||||
)
|
||||
max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
|
||||
|
||||
def populate_any_indicators(
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
|
||||
coin = pair.split('/')[0]
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
informative[f"%-{coin}pct-change"] = informative["close"].pct_change()
|
||||
informative[f"%-{coin}raw_volume"] = informative["volume"]
|
||||
informative[f"%-{coin}raw_price"] = informative["close"]
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||
if n == 0:
|
||||
continue
|
||||
informative_shift = informative[indicators].shift(n)
|
||||
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||
informative = pd.concat((informative, informative_shift), axis=1)
|
||||
|
||||
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||
skip_columns = [
|
||||
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||
]
|
||||
df = df.drop(columns=skip_columns)
|
||||
|
||||
# Add generalized indicators here (because in live, it will call this
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||
|
||||
# user adds targets here by prepending them with &- (see convention below)
|
||||
# If user wishes to use multiple targets, a multioutput prediction model
|
||||
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||
df['&s-up_or_down'] = np.where(df["close"].shift(-50) >
|
||||
df["close"], 'up', 'down')
|
||||
|
||||
df['&s-up_or_down2'] = np.where(df["close"].shift(-50) >
|
||||
df["close"], 'up2', 'down2')
|
||||
|
||||
return df
|
||||
|
||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
self.freqai_info = self.config["freqai"]
|
||||
|
||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||
|
||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
|
||||
return dataframe
|
||||
|
||||
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
|
||||
|
||||
if enter_long_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||
] = (1, "long")
|
||||
|
||||
enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"]]
|
||||
|
||||
if enter_short_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
|
||||
] = (1, "short")
|
||||
|
||||
return df
|
||||
|
||||
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"] * 0.25]
|
||||
if exit_long_conditions:
|
||||
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
||||
|
||||
exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"] * 0.25]
|
||||
if exit_short_conditions:
|
||||
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
||||
|
||||
return df
|
|
@ -1538,3 +1538,85 @@ def test_flat_vars_to_nested_dict(caplog):
|
|||
|
||||
assert log_has("Loading variable 'FREQTRADE__EXCHANGE__SOME_SETTING'", caplog)
|
||||
assert not log_has("Loading variable 'NOT_RELEVANT'", caplog)
|
||||
|
||||
|
||||
def test_setup_hyperopt_freqai(mocker, default_conf, caplog) -> None:
|
||||
patched_configuration_load_config_file(mocker, default_conf)
|
||||
mocker.patch(
|
||||
'freqtrade.configuration.configuration.create_datadir',
|
||||
lambda c, x: x
|
||||
)
|
||||
mocker.patch(
|
||||
'freqtrade.configuration.configuration.create_userdata_dir',
|
||||
lambda x, *args, **kwargs: Path(x)
|
||||
)
|
||||
arglist = [
|
||||
'hyperopt',
|
||||
'--config', 'config.json',
|
||||
'--strategy', CURRENT_TEST_STRATEGY,
|
||||
'--timerange', '20220801-20220805',
|
||||
"--freqaimodel",
|
||||
"LightGBMRegressorMultiTarget",
|
||||
"--analyze-per-epoch"
|
||||
]
|
||||
|
||||
args = Arguments(arglist).get_parsed_arg()
|
||||
|
||||
configuration = Configuration(args)
|
||||
config = configuration.get_config()
|
||||
config['freqai'] = {
|
||||
"enabled": True
|
||||
}
|
||||
with pytest.raises(
|
||||
OperationalException, match=r".*analyze-per-epoch parameter is not supported.*"
|
||||
):
|
||||
validate_config_consistency(config)
|
||||
|
||||
|
||||
def test_setup_freqai_backtesting(mocker, default_conf, caplog) -> None:
|
||||
patched_configuration_load_config_file(mocker, default_conf)
|
||||
mocker.patch(
|
||||
'freqtrade.configuration.configuration.create_datadir',
|
||||
lambda c, x: x
|
||||
)
|
||||
mocker.patch(
|
||||
'freqtrade.configuration.configuration.create_userdata_dir',
|
||||
lambda x, *args, **kwargs: Path(x)
|
||||
)
|
||||
arglist = [
|
||||
'backtesting',
|
||||
'--config', 'config.json',
|
||||
'--strategy', CURRENT_TEST_STRATEGY,
|
||||
'--timerange', '20220801-20220805',
|
||||
"--freqaimodel",
|
||||
"LightGBMRegressorMultiTarget",
|
||||
"--freqai-backtest-live-models"
|
||||
]
|
||||
|
||||
args = Arguments(arglist).get_parsed_arg()
|
||||
|
||||
configuration = Configuration(args)
|
||||
config = configuration.get_config()
|
||||
config['runmode'] = RunMode.BACKTEST
|
||||
|
||||
with pytest.raises(
|
||||
OperationalException, match=r".*--freqai-backtest-live-models parameter is only.*"
|
||||
):
|
||||
validate_config_consistency(config)
|
||||
|
||||
conf = deepcopy(config)
|
||||
conf['freqai'] = {
|
||||
"enabled": True
|
||||
}
|
||||
with pytest.raises(
|
||||
OperationalException, match=r".* timerange parameter is not supported with .*"
|
||||
):
|
||||
validate_config_consistency(conf)
|
||||
|
||||
conf['timerange'] = None
|
||||
conf['freqai_backtest_live_models'] = False
|
||||
|
||||
with pytest.raises(
|
||||
OperationalException, match=r".* pass --timerange if you intend to use FreqAI .*"
|
||||
):
|
||||
validate_config_consistency(conf)
|
||||
|
|
|
@ -5305,7 +5305,7 @@ def test_get_valid_price(mocker, default_conf_usdt) -> None:
|
|||
])
|
||||
def test_update_funding_fees_schedule(mocker, default_conf, trading_mode, calls, time_machine,
|
||||
t1, t2):
|
||||
time_machine.move_to(f"{t1} +00:00")
|
||||
time_machine.move_to(f"{t1} +00:00", tick=False)
|
||||
|
||||
patch_RPCManager(mocker)
|
||||
patch_exchange(mocker)
|
||||
|
@ -5314,7 +5314,7 @@ def test_update_funding_fees_schedule(mocker, default_conf, trading_mode, calls,
|
|||
default_conf['margin_mode'] = 'isolated'
|
||||
freqtrade = get_patched_freqtradebot(mocker, default_conf)
|
||||
|
||||
time_machine.move_to(f"{t2} +00:00")
|
||||
time_machine.move_to(f"{t2} +00:00", tick=False)
|
||||
# Check schedule jobs in debugging with freqtrade._schedule.jobs
|
||||
freqtrade._schedule.run_pending()
|
||||
|
||||
|
|
|
@ -113,6 +113,16 @@ def test_throttle_sleep_time(mocker, default_conf, caplog) -> None:
|
|||
# 300 (5m) - 60 (1m - see set time above) - 5 (duration of throttled_func) = 235
|
||||
assert 235.2 < sleep_mock.call_args[0][0] < 235.6
|
||||
|
||||
t.move_to("2022-09-01 05:04:51 +00:00")
|
||||
sleep_mock.reset_mock()
|
||||
# Offset of 5s, so we hit the sweet-spot between "candle" and "candle offset"
|
||||
# Which should not get a throttle iteration to avoid late candle fetching
|
||||
assert worker._throttle(throttled_func, throttle_secs=10, timeframe='5m',
|
||||
timeframe_offset=5, x=1.2) == 42
|
||||
assert sleep_mock.call_count == 1
|
||||
# Time is slightly bigger than throttle secs due to the high timeframe offset.
|
||||
assert 11.1 < sleep_mock.call_args[0][0] < 13.2
|
||||
|
||||
|
||||
def test_throttle_with_assets(mocker, default_conf) -> None:
|
||||
def throttled_func(nb_assets=-1):
|
||||
|
|
Loading…
Reference in New Issue
Block a user