From d4cfcbda24bf90ebd771f016f9fcb1fe46e2ac22 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Fri, 4 Nov 2022 17:53:15 +0100 Subject: [PATCH 1/3] move write_metrics_to_disk to proper place in param table --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 28a15913b..8a240c372 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -18,6 +18,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. +| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | | **Feature parameters** | `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md).
**Datatype:** Dictionary. | `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset.
**Datatype:** List of timeframes (strings). @@ -37,7 +38,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation).
**Datatype:** Integer.
Default: `0`. | `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset.
**Datatype:** Float.
Default: `30`. | `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it.
**Datatype:** Boolean.
Default: `False` (no reversal). -| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file.
**Datatype:** Boolean.
Default: `False` | | **Data split parameters** | `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
**Datatype:** Dictionary. | `test_size` | The fraction of data that should be used for testing instead of training.
**Datatype:** Positive float < 1. From 6e09d552ac19377acbabe17846f2d60ce3a59b2c Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 13:14:35 +0100 Subject: [PATCH 2/3] Properly handle and test ohlcv min_max with empty files --- freqtrade/data/history/idatahandler.py | 5 +++++ tests/data/test_datahandler.py | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/freqtrade/data/history/idatahandler.py b/freqtrade/data/history/idatahandler.py index cbc3f1a34..b82d2055b 100644 --- a/freqtrade/data/history/idatahandler.py +++ b/freqtrade/data/history/idatahandler.py @@ -102,6 +102,11 @@ class IDataHandler(ABC): :return: (min, max) """ data = self._ohlcv_load(pair, timeframe, None, candle_type) + if data.empty: + return ( + datetime.fromtimestamp(0, tz=timezone.utc), + datetime.fromtimestamp(0, tz=timezone.utc) + ) return data.iloc[0]['date'].to_pydatetime(), data.iloc[-1]['date'].to_pydatetime() @abstractmethod diff --git a/tests/data/test_datahandler.py b/tests/data/test_datahandler.py index 67eeda7d0..c067d0339 100644 --- a/tests/data/test_datahandler.py +++ b/tests/data/test_datahandler.py @@ -1,6 +1,7 @@ # pragma pylint: disable=missing-docstring, protected-access, C0103 import re +from datetime import datetime, timezone from pathlib import Path from unittest.mock import MagicMock @@ -154,6 +155,23 @@ def test_jsondatahandler_ohlcv_load(testdatadir, caplog): assert df.columns.equals(df1.columns) +def test_datahandler_ohlcv_data_min_max(testdatadir): + dh = JsonDataHandler(testdatadir) + min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot') + assert len(min_max) == 2 + + # Empty pair + min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot') + assert len(min_max) == 2 + assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) + assert min_max[0] == min_max[1] + # Empty pair2 + min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '4m', 'spot') + assert len(min_max) == 2 + assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) + assert min_max[0] == min_max[1] + + def test_datahandler__check_empty_df(testdatadir, caplog): dh = JsonDataHandler(testdatadir) expected_text = r"Price jump in UNITTEST/USDT, 1h, spot between" From 25b8d34fe2ac1d95dadc1a25e2f5dcf0bdc34aa8 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 5 Nov 2022 17:02:18 +0100 Subject: [PATCH 3/3] Update backtesting test Had bad behavior before, and didn't properly test what it was supposed to --- tests/optimize/test_backtesting.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 290e08455..140cc3394 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -788,13 +788,14 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: assert len(t['orders']) == 2 ln = data_pair.loc[data_pair["date"] == t["open_date"]] # Check open trade rate alignes to open rate - assert ln is not None + assert not ln.empty assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) # check close trade rate alignes to close rate or is between high and low - ln = data_pair.loc[data_pair["date"] == t["close_date"]] - assert (round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or - round(ln.iloc[0]["low"], 6) < round( - t["close_rate"], 6) < round(ln.iloc[0]["high"], 6)) + ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] + assert not ln1.empty + assert (round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or + round(ln1.iloc[0]["low"], 6) < round( + t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6)) def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) -> None: