mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-10 10:21:59 +00:00
Merge branch 'develop' into dependabot/pip/develop/numpy-1.25.0
This commit is contained in:
commit
2e78f7503e
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
@ -160,7 +160,8 @@ jobs:
|
|||
- name: Installation - macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
# brew update
|
||||
# TODO: Should be the brew upgrade
|
||||
# homebrew fails to update python due to unlinking failures
|
||||
# https://github.com/actions/runner-images/issues/6817
|
||||
rm /usr/local/bin/2to3 || true
|
||||
|
@ -460,7 +461,7 @@ jobs:
|
|||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI (Test)
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.7
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
@ -468,7 +469,7 @@ jobs:
|
|||
repository_url: https://test.pypi.org/legacy/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.7
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
|
|
@ -18,7 +18,7 @@ repos:
|
|||
- types-requests==2.31.0.1
|
||||
- types-tabulate==0.9.0.2
|
||||
- types-python-dateutil==2.8.19.13
|
||||
- SQLAlchemy==2.0.16
|
||||
- SQLAlchemy==2.0.17
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
|
|
|
@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy):
|
|||
|
||||
### Dynamic parameters
|
||||
|
||||
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||
Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||
|
||||
``` python
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ To download data (candles / OHLCV) needed for backtesting and hyperoptimization
|
|||
|
||||
If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days.
|
||||
Exchange and pairs will come from `config.json` (if specified using `-c/--config`).
|
||||
Otherwise `--exchange` becomes mandatory.
|
||||
Without provided configuration, `--exchange` becomes mandatory.
|
||||
|
||||
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
|
||||
|
||||
|
@ -83,40 +83,47 @@ Common arguments:
|
|||
|
||||
```
|
||||
|
||||
!!! Tip "Downloading all data for one quote currency"
|
||||
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
|
||||
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
|
||||
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
|
||||
|
||||
!!! Note "Startup period"
|
||||
`download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored.
|
||||
|
||||
For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period).
|
||||
|
||||
### Pairs file
|
||||
### Start download
|
||||
|
||||
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
|
||||
If you are using Binance for example:
|
||||
|
||||
- create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
|
||||
- update the `pairs.json` file to contain the currency pairs you are interested in.
|
||||
A very simple command (assuming an available `config.json` file) can look as follows.
|
||||
|
||||
```bash
|
||||
mkdir -p user_data/data/binance
|
||||
touch user_data/data/binance/pairs.json
|
||||
freqtrade download-data --exchange binance
|
||||
```
|
||||
|
||||
The format of the `pairs.json` file is a simple json list.
|
||||
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
|
||||
This will download historical candle (OHLCV) data for all the currency pairs defined in the configuration.
|
||||
|
||||
``` json
|
||||
[
|
||||
"ETH/BTC",
|
||||
"ETH/USDT",
|
||||
"BTC/USDT",
|
||||
"XRP/ETH"
|
||||
]
|
||||
Alternatively, specify the pairs directly
|
||||
|
||||
```bash
|
||||
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
|
||||
```
|
||||
|
||||
!!! Tip "Downloading all data for one quote currency"
|
||||
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
|
||||
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
|
||||
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
|
||||
or as regex (in this case, to download all active USDT pairs)
|
||||
|
||||
```bash
|
||||
freqtrade download-data --exchange binance --pairs .*/USDT
|
||||
```
|
||||
|
||||
### Other Notes
|
||||
|
||||
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
|
||||
* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
|
||||
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
|
||||
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
|
||||
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
|
||||
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
|
||||
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
|
||||
|
||||
??? Note "Permission denied errors"
|
||||
If your configuration directory `user_data` was made by docker, you may get the following error:
|
||||
|
@ -131,39 +138,7 @@ Mixing different stake-currencies is allowed for this file, since it's only used
|
|||
sudo chown -R $UID:$GID user_data
|
||||
```
|
||||
|
||||
### Start download
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
freqtrade download-data --exchange binance
|
||||
```
|
||||
|
||||
This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`.
|
||||
|
||||
Alternatively, specify the pairs directly
|
||||
|
||||
```bash
|
||||
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
|
||||
```
|
||||
|
||||
or as regex (to download all active USDT pairs)
|
||||
|
||||
```bash
|
||||
freqtrade download-data --exchange binance --pairs .*/USDT
|
||||
```
|
||||
|
||||
### Other Notes
|
||||
|
||||
- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
|
||||
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
|
||||
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
|
||||
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
|
||||
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
|
||||
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
|
||||
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
|
||||
|
||||
#### Download additional data before the current timerange
|
||||
### Download additional data before the current timerange
|
||||
|
||||
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
|
||||
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
|
||||
|
@ -238,7 +213,36 @@ Size has been taken from the BTC/USDT 1m spot combination for the timerange spec
|
|||
|
||||
To have a best performance/size mix, we recommend the use of either feather or parquet.
|
||||
|
||||
#### Sub-command convert data
|
||||
### Pairs file
|
||||
|
||||
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
|
||||
If you are using Binance for example:
|
||||
|
||||
* create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
|
||||
* update the `pairs.json` file to contain the currency pairs you are interested in.
|
||||
|
||||
```bash
|
||||
mkdir -p user_data/data/binance
|
||||
touch user_data/data/binance/pairs.json
|
||||
```
|
||||
|
||||
The format of the `pairs.json` file is a simple json list.
|
||||
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
|
||||
|
||||
``` json
|
||||
[
|
||||
"ETH/BTC",
|
||||
"ETH/USDT",
|
||||
"BTC/USDT",
|
||||
"XRP/ETH"
|
||||
]
|
||||
```
|
||||
|
||||
!!! Note
|
||||
The `pairs.json` file is only used when no configuration is loaded (implicitly by naming, or via `--config` flag).
|
||||
You can force the usage of this file via `--pairs-file pairs.json` - however we recommend to use the pairlist from within the configuration, either via `exchange.pair_whitelist` or `pairs` setting in the configuration.
|
||||
|
||||
## Sub-command convert data
|
||||
|
||||
```
|
||||
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||
|
@ -290,7 +294,7 @@ Common arguments:
|
|||
|
||||
```
|
||||
|
||||
##### Example converting data
|
||||
### Example converting data
|
||||
|
||||
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
|
||||
It'll also remove original json data files (`--erase` parameter).
|
||||
|
@ -299,7 +303,7 @@ It'll also remove original json data files (`--erase` parameter).
|
|||
freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase
|
||||
```
|
||||
|
||||
#### Sub-command convert trade data
|
||||
## Sub-command convert trade data
|
||||
|
||||
```
|
||||
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||
|
@ -342,7 +346,7 @@ Common arguments:
|
|||
|
||||
```
|
||||
|
||||
##### Example converting trades
|
||||
### Example converting trades
|
||||
|
||||
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
|
||||
It'll also remove original jsongz data files (`--erase` parameter).
|
||||
|
@ -351,7 +355,7 @@ It'll also remove original jsongz data files (`--erase` parameter).
|
|||
freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase
|
||||
```
|
||||
|
||||
### Sub-command trades to ohlcv
|
||||
## Sub-command trades to ohlcv
|
||||
|
||||
When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step.
|
||||
This command will allow you to repeat this last step for additional timeframes without re-downloading the data.
|
||||
|
@ -400,13 +404,13 @@ Common arguments:
|
|||
|
||||
```
|
||||
|
||||
#### Example trade-to-ohlcv conversion
|
||||
### Example trade-to-ohlcv conversion
|
||||
|
||||
``` bash
|
||||
freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR
|
||||
```
|
||||
|
||||
### Sub-command list-data
|
||||
## Sub-command list-data
|
||||
|
||||
You can get a list of downloaded data using the `list-data` sub-command.
|
||||
|
||||
|
@ -451,7 +455,7 @@ Common arguments:
|
|||
|
||||
```
|
||||
|
||||
#### Example list-data
|
||||
### Example list-data
|
||||
|
||||
```bash
|
||||
> freqtrade list-data --userdir ~/.freqtrade/user_data/
|
||||
|
@ -465,7 +469,7 @@ ETH/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
|
|||
ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
|
||||
```
|
||||
|
||||
### Trades (tick) data
|
||||
## Trades (tick) data
|
||||
|
||||
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
|
||||
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.
|
||||
|
|
|
@ -453,7 +453,13 @@ Once the PR against stable is merged (best right after merging):
|
|||
* Use the button "Draft a new release" in the Github UI (subsection releases).
|
||||
* Use the version-number specified as tag.
|
||||
* Use "stable" as reference (this step comes after the above PR is merged).
|
||||
* Use the above changelog as release comment (as codeblock)
|
||||
* Use the above changelog as release comment (as codeblock).
|
||||
* Use the below snippet for the new release
|
||||
|
||||
??? Tip "Release template"
|
||||
````
|
||||
--8<-- "includes/release_template.md"
|
||||
````
|
||||
|
||||
## Releases
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat
|
|||
|------------|-------------|
|
||||
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
||||
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
||||
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
||||
|
||||
|
|
37
docs/includes/release_template.md
Normal file
37
docs/includes/release_template.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
## Highlighted changes
|
||||
|
||||
- ...
|
||||
|
||||
### How to update
|
||||
|
||||
As always, you can update your bot using one of the following commands:
|
||||
|
||||
#### docker-compose
|
||||
|
||||
```bash
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
#### Installation via setup script
|
||||
|
||||
```
|
||||
# Deactivate venv and run
|
||||
./setup.sh --update
|
||||
```
|
||||
|
||||
#### Plain native installation
|
||||
|
||||
```
|
||||
git pull
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Expand full changelog</summary>
|
||||
|
||||
```
|
||||
<Paste your changelog here>
|
||||
```
|
||||
|
||||
</details>
|
|
@ -1,6 +1,6 @@
|
|||
markdown==3.3.7
|
||||
mkdocs==1.4.3
|
||||
mkdocs-material==9.1.16
|
||||
mkdocs-material==9.1.17
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==10.0.1
|
||||
jinja2==3.1.2
|
||||
|
|
|
@ -800,8 +800,8 @@ class MyCoolFreqaiModel(BaseRegressionModel):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
# ... your custom code
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
""" Freqtrade bot """
|
||||
__version__ = '2023.6.dev'
|
||||
__version__ = '2023.7.dev'
|
||||
|
||||
if 'dev' in __version__:
|
||||
from pathlib import Path
|
||||
|
|
|
@ -33,11 +33,11 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
|
|||
else:
|
||||
headers = {
|
||||
'name': 'Exchange name',
|
||||
'valid': 'Valid',
|
||||
'supported': 'Supported',
|
||||
'trade_modes': 'Markets',
|
||||
'comment': 'Reason',
|
||||
}
|
||||
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
|
||||
|
||||
def build_entry(exchange: ValidExchangesType, valid: bool):
|
||||
valid_entry = {'valid': exchange['valid']} if valid else {}
|
||||
|
|
|
@ -568,6 +568,7 @@ class Configuration:
|
|||
# Fall back to /dl_path/pairs.json
|
||||
pairs_file = config['datadir'] / 'pairs.json'
|
||||
if pairs_file.exists():
|
||||
logger.info(f'Reading pairs file "{pairs_file}".')
|
||||
config['pairs'] = load_file(pairs_file)
|
||||
if 'pairs' in config and isinstance(config['pairs'], list):
|
||||
config['pairs'].sort()
|
||||
|
|
|
@ -6,6 +6,8 @@ import re
|
|||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
@ -107,15 +109,15 @@ class TimeRange:
|
|||
self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)
|
||||
self.starttype = 'date'
|
||||
|
||||
@staticmethod
|
||||
def parse_timerange(text: Optional[str]) -> 'TimeRange':
|
||||
@classmethod
|
||||
def parse_timerange(cls, text: Optional[str]) -> Self:
|
||||
"""
|
||||
Parse the value of the argument --timerange to determine what is the range desired
|
||||
:param text: value from --timerange
|
||||
:return: Start and End range period
|
||||
"""
|
||||
if not text:
|
||||
return TimeRange(None, None, 0, 0)
|
||||
return cls(None, None, 0, 0)
|
||||
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
||||
(r'^(\d{8})-$', ('date', None)),
|
||||
(r'^(\d{8})-(\d{8})$', ('date', 'date')),
|
||||
|
@ -156,5 +158,5 @@ class TimeRange:
|
|||
if start > stop > 0:
|
||||
raise OperationalException(
|
||||
f'Start date is after stop date for timerange "{text}"')
|
||||
return TimeRange(stype[0], stype[1], start, stop)
|
||||
return cls(stype[0], stype[1], start, stop)
|
||||
raise OperationalException(f'Incorrect syntax for timerange "{text}"')
|
||||
|
|
|
@ -112,6 +112,8 @@ MINIMAL_CONFIG = {
|
|||
}
|
||||
}
|
||||
|
||||
__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}
|
||||
|
||||
# Required json-schema for user specified config
|
||||
CONF_SCHEMA = {
|
||||
'type': 'object',
|
||||
|
@ -354,7 +356,8 @@ CONF_SCHEMA = {
|
|||
'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},
|
||||
'retries': {'type': 'integer', 'minimum': 0},
|
||||
'retry_delay': {'type': 'number', 'minimum': 0},
|
||||
**dict([(x, {'type': 'object'}) for x in RPCMessageType]),
|
||||
**__MESSAGE_TYPE_DICT,
|
||||
# **{x: {'type': 'object'} for x in RPCMessageType},
|
||||
# Below -> Deprecated
|
||||
'webhookentry': {'type': 'object'},
|
||||
'webhookentrycancel': {'type': 'object'},
|
||||
|
|
|
@ -14,8 +14,8 @@ from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
|
|||
from freqtrade.enums import CandleType
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import Exchange
|
||||
from freqtrade.misc import format_ms_time
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
|
||||
from freqtrade.util import format_ms_time
|
||||
from freqtrade.util.binance_mig import migrate_binance_futures_data
|
||||
|
||||
|
||||
|
@ -354,7 +354,7 @@ def _download_trades_history(exchange: Exchange,
|
|||
trades = []
|
||||
|
||||
if not since:
|
||||
since = int((datetime.now() - timedelta(days=-new_pairs_days)).timestamp()) * 1000
|
||||
since = int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000
|
||||
|
||||
from_id = trades[-1][1] if trades else None
|
||||
if trades and since < trades[-1][0]:
|
||||
|
|
|
@ -120,7 +120,7 @@ class BaseClassifierModel(IFreqaiModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -94,8 +94,8 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ class BasePyTorchRegressor(BasePyTorchModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
def train(
|
||||
|
|
|
@ -114,7 +114,7 @@ class BaseRegressionModel(IFreqaiModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -515,7 +515,7 @@ class IFreqaiModel(ABC):
|
|||
]
|
||||
|
||||
if ft_params.get("principal_component_analysis", False):
|
||||
pipe_steps.append(('pca', ds.PCA()))
|
||||
pipe_steps.append(('pca', ds.PCA(n_components=0.999)))
|
||||
pipe_steps.append(('post-pca-scaler',
|
||||
SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))))
|
||||
|
||||
|
@ -1012,6 +1012,6 @@ class IFreqaiModel(ABC):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
return
|
||||
|
|
|
@ -136,8 +136,8 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
if x.shape[1] > 1:
|
||||
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
|
||||
|
|
|
@ -3,7 +3,6 @@ Various tool function for Freqtrade and scripts
|
|||
"""
|
||||
import gzip
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
||||
from urllib.parse import urlparse
|
||||
|
@ -123,14 +122,6 @@ def pair_to_filename(pair: str) -> str:
|
|||
return pair
|
||||
|
||||
|
||||
def format_ms_time(date: int) -> str:
|
||||
"""
|
||||
convert MS date to readable format.
|
||||
: epoch-string in ms
|
||||
"""
|
||||
return datetime.fromtimestamp(date / 1000.0).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
|
||||
|
||||
def deep_merge_dicts(source, destination, allow_null_overrides: bool = True):
|
||||
"""
|
||||
Values from Source override destination, destination is returned (and modified!!)
|
||||
|
|
|
@ -3,7 +3,7 @@ from typing import Callable
|
|||
from cachetools import TTLCache, cached
|
||||
|
||||
|
||||
class LoggingMixin():
|
||||
class LoggingMixin:
|
||||
"""
|
||||
Logging Mixin
|
||||
Shows similar messages only once every `refresh_period`.
|
||||
|
|
|
@ -35,7 +35,7 @@ def hyperopt_serializer(x):
|
|||
return str(x)
|
||||
|
||||
|
||||
class HyperoptStateContainer():
|
||||
class HyperoptStateContainer:
|
||||
""" Singleton class to track state of hyperopt"""
|
||||
state: HyperoptState = HyperoptState.OPTIMIZE
|
||||
|
||||
|
@ -44,7 +44,7 @@ class HyperoptStateContainer():
|
|||
cls.state = value
|
||||
|
||||
|
||||
class HyperoptTools():
|
||||
class HyperoptTools:
|
||||
|
||||
@staticmethod
|
||||
def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]:
|
||||
|
|
18
freqtrade/optimize/optimize_reports/__init__.py
Normal file
18
freqtrade/optimize/optimize_reports/__init__.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
# flake8: noqa: F401
|
||||
from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table,
|
||||
show_backtest_result,
|
||||
show_backtest_results,
|
||||
show_sorted_pairlist,
|
||||
text_table_add_metrics,
|
||||
text_table_bt_results,
|
||||
text_table_exit_reason,
|
||||
text_table_periodic_breakdown,
|
||||
text_table_strategy, text_table_tags)
|
||||
from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results,
|
||||
store_backtest_stats)
|
||||
from freqtrade.optimize.optimize_reports.optimize_reports import (
|
||||
generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats,
|
||||
generate_exit_reason_stats, generate_pair_metrics, generate_periodic_breakdown_stats,
|
||||
generate_rejected_signals, generate_strategy_comparison, generate_strategy_stats,
|
||||
generate_tag_metrics, generate_trade_signal_candles, generate_trading_stats,
|
||||
generate_wins_draws_losses)
|
405
freqtrade/optimize/optimize_reports/bt_output.py
Normal file
405
freqtrade/optimize/optimize_reports/bt_output.py
Normal file
|
@ -0,0 +1,405 @@
|
|||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
|
||||
from freqtrade.misc import decimals_per_coin, round_coin_value
|
||||
from freqtrade.optimize.optimize_reports.optimize_reports import (generate_periodic_breakdown_stats,
|
||||
generate_wins_draws_losses)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
"""
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
"""
|
||||
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
|
||||
'.2f', 'd', 's', 's']
|
||||
|
||||
|
||||
def _get_line_header(first_column: str, stake_currency: str,
|
||||
direction: str = 'Entries') -> List[str]:
|
||||
"""
|
||||
Generate header lines (goes in line with _generate_result_line())
|
||||
"""
|
||||
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
|
||||
'Win Draw Loss Win%']
|
||||
|
||||
|
||||
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
|
||||
headers = _get_line_header('Pair', stake_currency)
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
|
||||
] for t in pair_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generate small table outlining Backtest results
|
||||
:param sell_reason_stats: Exit reason metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
'Exit Reason',
|
||||
'Exits',
|
||||
'Win Draws Loss Win%',
|
||||
'Avg Profit %',
|
||||
'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Tot Profit %',
|
||||
]
|
||||
|
||||
output = [[
|
||||
t.get('exit_reason', t.get('sell_reason')), t['trades'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
|
||||
t['profit_mean_pct'], t['profit_sum_pct'],
|
||||
round_coin_value(t['profit_total_abs'], stake_currency, False),
|
||||
t['profit_total_pct'],
|
||||
] for t in exit_reason_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
if (tag_type == "enter_tag"):
|
||||
headers = _get_line_header("TAG", stake_currency)
|
||||
else:
|
||||
headers = _get_line_header("TAG", stake_currency, 'Exits')
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [
|
||||
[
|
||||
t['key'] if t['key'] is not None and len(
|
||||
t['key']) > 0 else "OTHER",
|
||||
t['trades'],
|
||||
t['profit_mean_pct'],
|
||||
t['profit_sum_pct'],
|
||||
t['profit_total_abs'],
|
||||
t['profit_total_pct'],
|
||||
t['duration_avg'],
|
||||
generate_wins_draws_losses(
|
||||
t['wins'],
|
||||
t['draws'],
|
||||
t['losses'])] for t in tag_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
|
||||
stake_currency: str, period: str) -> str:
|
||||
"""
|
||||
Generate small table with Backtest results by days
|
||||
:param days_breakdown_stats: Days breakdown metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
period.capitalize(),
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Wins',
|
||||
'Draws',
|
||||
'Losses',
|
||||
]
|
||||
output = [[
|
||||
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
|
||||
d['wins'], d['draws'], d['loses'],
|
||||
] for d in days_breakdown_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
||||
"""
|
||||
Generate summary table per strategy
|
||||
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
headers = _get_line_header('Strategy', stake_currency)
|
||||
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
|
||||
# therefore we slip this column in only for strategy summary here.
|
||||
headers.append('Drawdown')
|
||||
|
||||
# Align drawdown string on the center two space separator.
|
||||
if 'max_drawdown_account' in strategy_results[0]:
|
||||
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
|
||||
else:
|
||||
# Support for prior backtest results
|
||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||
|
||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||
for t, dd in zip(strategy_results, drawdown)]
|
||||
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
|
||||
for t, drawdown in zip(strategy_results, drawdown)]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
if len(strat_results['trades']) > 0:
|
||||
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
|
||||
short_metrics = [
|
||||
('', ''), # Empty line to improve readability
|
||||
('Long / Short',
|
||||
f"{strat_results.get('trade_count_long', 'total_trades')} / "
|
||||
f"{strat_results.get('trade_count_short', 0)}"),
|
||||
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
|
||||
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
|
||||
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
] if strat_results.get('trade_count_short', 0) > 0 else []
|
||||
|
||||
drawdown_metrics = []
|
||||
if 'max_relative_drawdown' in strat_results:
|
||||
# Compatibility to show old hyperopt results
|
||||
drawdown_metrics.append(
|
||||
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
|
||||
)
|
||||
drawdown_metrics.extend([
|
||||
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
|
||||
if 'max_drawdown_account' in strat_results else (
|
||||
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
||||
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown Start', strat_results['drawdown_start']),
|
||||
('Drawdown End', strat_results['drawdown_end']),
|
||||
])
|
||||
|
||||
entry_adjustment_metrics = [
|
||||
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
|
||||
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
|
||||
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
|
||||
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
|
||||
|
||||
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
|
||||
# command stores these results and newer version of freqtrade must be able to handle old
|
||||
# results with missing new fields.
|
||||
metrics = [
|
||||
('Backtesting from', strat_results['backtest_start']),
|
||||
('Backtesting to', strat_results['backtest_end']),
|
||||
('Max open trades', strat_results['max_open_trades']),
|
||||
('', ''), # Empty line to improve readability
|
||||
('Total/Daily Avg Trades',
|
||||
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
|
||||
|
||||
('Starting balance', round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Final balance', round_coin_value(strat_results['final_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
||||
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
|
||||
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
|
||||
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
|
||||
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
|
||||
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
|
||||
in strat_results else 'N/A'),
|
||||
('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
|
||||
in strat_results else 'N/A'),
|
||||
('Trades per day', strat_results['trades_per_day']),
|
||||
('Avg. daily profit %',
|
||||
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
||||
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total trade volume', round_coin_value(strat_results['total_volume'],
|
||||
strat_results['stake_currency'])),
|
||||
*short_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
('Best Pair', f"{strat_results['best_pair']['key']} "
|
||||
f"{strat_results['best_pair']['profit_sum']:.2%}"),
|
||||
('Worst Pair', f"{strat_results['worst_pair']['key']} "
|
||||
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
|
||||
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
||||
('Worst trade', f"{worst_trade['pair']} "
|
||||
f"{worst_trade['profit_ratio']:.2%}"),
|
||||
|
||||
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Days win/draw/lose', f"{strat_results['winning_days']} / "
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
|
||||
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
|
||||
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
|
||||
('Entry/Exit Timeouts',
|
||||
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
|
||||
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
|
||||
*entry_adjustment_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
|
||||
('Min balance', round_coin_value(strat_results['csum_min'],
|
||||
strat_results['stake_currency'])),
|
||||
('Max balance', round_coin_value(strat_results['csum_max'],
|
||||
strat_results['stake_currency'])),
|
||||
|
||||
*drawdown_metrics,
|
||||
('Market change', f"{strat_results['market_change']:.2%}"),
|
||||
]
|
||||
|
||||
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
|
||||
else:
|
||||
start_balance = round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])
|
||||
stake_amount = round_coin_value(
|
||||
strat_results['stake_amount'], strat_results['stake_currency']
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
|
||||
message = ("No trades made. "
|
||||
f"Your starting balance was {start_balance}, "
|
||||
f"and your stake was {stake_amount}."
|
||||
)
|
||||
return message
|
||||
|
||||
|
||||
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
|
||||
backtest_breakdown=[]):
|
||||
"""
|
||||
Print results for one strategy
|
||||
"""
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if (results.get('results_per_enter_tag') is not None
|
||||
or results.get('results_per_buy_tag') is not None):
|
||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||
table = text_table_tags(
|
||||
"enter_tag",
|
||||
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
|
||||
stake_currency=stake_currency)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
|
||||
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
for period in backtest_breakdown:
|
||||
if period in results.get('periodic_breakdown', {}):
|
||||
days_breakdown_stats = results['periodic_breakdown'][period]
|
||||
else:
|
||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||
trade_list=results['trades'], period=period)
|
||||
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
|
||||
stake_currency=stake_currency, period=period)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def show_backtest_results(config: Config, backtest_stats: Dict):
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
show_backtest_result(
|
||||
strategy, results, stake_currency,
|
||||
config.get('backtest_breakdown', []))
|
||||
|
||||
if len(backtest_stats['strategy']) > 0:
|
||||
# Print Strategy summary table
|
||||
|
||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
||||
f" Max open trades : {results['max_open_trades']}")
|
||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print('\nFor more details, please look at the detail tables above')
|
||||
|
||||
|
||||
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
|
||||
if config.get('backtest_show_pair_list', False):
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
print(f"Pairs for Strategy {strategy}: \n[")
|
||||
for result in results['results_per_pair']:
|
||||
if result["key"] != 'TOTAL':
|
||||
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
|
||||
print("]")
|
||||
|
||||
|
||||
def generate_edge_table(results: dict) -> str:
|
||||
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
|
||||
tabular_data = []
|
||||
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
|
||||
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
|
||||
'Average Duration (min)']
|
||||
|
||||
for result in results.items():
|
||||
if result[1].nb_trades > 0:
|
||||
tabular_data.append([
|
||||
result[0],
|
||||
result[1].stoploss,
|
||||
result[1].winrate,
|
||||
result[1].risk_reward_ratio,
|
||||
result[1].required_risk_reward,
|
||||
result[1].expectancy,
|
||||
result[1].nb_trades,
|
||||
round(result[1].avg_trade_duration)
|
||||
])
|
||||
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(tabular_data, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
71
freqtrade/optimize/optimize_reports/bt_storage.py
Normal file
71
freqtrade/optimize/optimize_reports/bt_storage.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||
from freqtrade.misc import file_dump_joblib, file_dump_json
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def store_backtest_stats(
|
||||
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
|
||||
"""
|
||||
Stores backtest results
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
|
||||
).with_suffix(recordfilename.suffix)
|
||||
|
||||
# Store metadata separately.
|
||||
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
|
||||
del stats['metadata']
|
||||
|
||||
file_dump_json(filename, stats)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||
|
||||
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: Dict[str, Dict],
|
||||
dtappendix: str, name: str) -> Path:
|
||||
"""
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||
)
|
||||
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def store_backtest_analysis_results(
|
||||
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||
dtappendix: str) -> None:
|
||||
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
|
@ -1,83 +1,20 @@
|
|||
import logging
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from pandas import DataFrame, concat, to_datetime
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
|
||||
UNLIMITED_STAKE_AMOUNT, Config, IntOrInf)
|
||||
from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, IntOrInf
|
||||
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
|
||||
calculate_expectancy, calculate_market_change,
|
||||
calculate_max_drawdown, calculate_sharpe, calculate_sortino)
|
||||
from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
from freqtrade.misc import decimals_per_coin, round_coin_value
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def store_backtest_stats(
|
||||
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
|
||||
"""
|
||||
Stores backtest results
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
|
||||
).with_suffix(recordfilename.suffix)
|
||||
|
||||
# Store metadata separately.
|
||||
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
|
||||
del stats['metadata']
|
||||
|
||||
file_dump_json(filename, stats)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||
|
||||
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: Dict[str, Dict],
|
||||
dtappendix: str, name: str) -> Path:
|
||||
"""
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||
)
|
||||
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def store_backtest_analysis_results(
|
||||
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||
dtappendix: str) -> None:
|
||||
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
||||
|
||||
|
||||
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
|
||||
bt_results: Dict[str, Any]) -> DataFrame:
|
||||
signal_candles_only = {}
|
||||
|
@ -120,24 +57,6 @@ def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
|
|||
return rejected_candles_only
|
||||
|
||||
|
||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
"""
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
"""
|
||||
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
|
||||
'.2f', 'd', 's', 's']
|
||||
|
||||
|
||||
def _get_line_header(first_column: str, stake_currency: str,
|
||||
direction: str = 'Entries') -> List[str]:
|
||||
"""
|
||||
Generate header lines (goes in line with _generate_result_line())
|
||||
"""
|
||||
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
|
||||
'Win Draw Loss Win%']
|
||||
|
||||
|
||||
def generate_wins_draws_losses(wins, draws, losses):
|
||||
if wins > 0 and losses == 0:
|
||||
wl_ratio = '100'
|
||||
|
@ -295,31 +214,6 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
|
|||
return tabular_data
|
||||
|
||||
|
||||
def generate_edge_table(results: dict) -> str:
|
||||
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
|
||||
tabular_data = []
|
||||
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
|
||||
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
|
||||
'Average Duration (min)']
|
||||
|
||||
for result in results.items():
|
||||
if result[1].nb_trades > 0:
|
||||
tabular_data.append([
|
||||
result[0],
|
||||
result[1].stoploss,
|
||||
result[1].winrate,
|
||||
result[1].risk_reward_ratio,
|
||||
result[1].required_risk_reward,
|
||||
result[1].expectancy,
|
||||
result[1].nb_trades,
|
||||
round(result[1].avg_trade_duration)
|
||||
])
|
||||
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(tabular_data, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def _get_resample_from_period(period: str) -> str:
|
||||
if period == 'day':
|
||||
return '1d'
|
||||
|
@ -652,357 +546,3 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
|||
result['strategy_comparison'] = strategy_results
|
||||
|
||||
return result
|
||||
|
||||
|
||||
###
|
||||
# Start output section
|
||||
###
|
||||
|
||||
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
|
||||
headers = _get_line_header('Pair', stake_currency)
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
|
||||
] for t in pair_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generate small table outlining Backtest results
|
||||
:param sell_reason_stats: Exit reason metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
'Exit Reason',
|
||||
'Exits',
|
||||
'Win Draws Loss Win%',
|
||||
'Avg Profit %',
|
||||
'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Tot Profit %',
|
||||
]
|
||||
|
||||
output = [[
|
||||
t.get('exit_reason', t.get('sell_reason')), t['trades'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
|
||||
t['profit_mean_pct'], t['profit_sum_pct'],
|
||||
round_coin_value(t['profit_total_abs'], stake_currency, False),
|
||||
t['profit_total_pct'],
|
||||
] for t in exit_reason_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
if (tag_type == "enter_tag"):
|
||||
headers = _get_line_header("TAG", stake_currency)
|
||||
else:
|
||||
headers = _get_line_header("TAG", stake_currency, 'Exits')
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [
|
||||
[
|
||||
t['key'] if t['key'] is not None and len(
|
||||
t['key']) > 0 else "OTHER",
|
||||
t['trades'],
|
||||
t['profit_mean_pct'],
|
||||
t['profit_sum_pct'],
|
||||
t['profit_total_abs'],
|
||||
t['profit_total_pct'],
|
||||
t['duration_avg'],
|
||||
generate_wins_draws_losses(
|
||||
t['wins'],
|
||||
t['draws'],
|
||||
t['losses'])] for t in tag_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
|
||||
stake_currency: str, period: str) -> str:
|
||||
"""
|
||||
Generate small table with Backtest results by days
|
||||
:param days_breakdown_stats: Days breakdown metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
period.capitalize(),
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Wins',
|
||||
'Draws',
|
||||
'Losses',
|
||||
]
|
||||
output = [[
|
||||
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
|
||||
d['wins'], d['draws'], d['loses'],
|
||||
] for d in days_breakdown_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
||||
"""
|
||||
Generate summary table per strategy
|
||||
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
headers = _get_line_header('Strategy', stake_currency)
|
||||
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
|
||||
# therefore we slip this column in only for strategy summary here.
|
||||
headers.append('Drawdown')
|
||||
|
||||
# Align drawdown string on the center two space separator.
|
||||
if 'max_drawdown_account' in strategy_results[0]:
|
||||
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
|
||||
else:
|
||||
# Support for prior backtest results
|
||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||
|
||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||
for t, dd in zip(strategy_results, drawdown)]
|
||||
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
|
||||
for t, drawdown in zip(strategy_results, drawdown)]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
if len(strat_results['trades']) > 0:
|
||||
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
|
||||
short_metrics = [
|
||||
('', ''), # Empty line to improve readability
|
||||
('Long / Short',
|
||||
f"{strat_results.get('trade_count_long', 'total_trades')} / "
|
||||
f"{strat_results.get('trade_count_short', 0)}"),
|
||||
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
|
||||
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
|
||||
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
] if strat_results.get('trade_count_short', 0) > 0 else []
|
||||
|
||||
drawdown_metrics = []
|
||||
if 'max_relative_drawdown' in strat_results:
|
||||
# Compatibility to show old hyperopt results
|
||||
drawdown_metrics.append(
|
||||
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
|
||||
)
|
||||
drawdown_metrics.extend([
|
||||
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
|
||||
if 'max_drawdown_account' in strat_results else (
|
||||
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
||||
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown Start', strat_results['drawdown_start']),
|
||||
('Drawdown End', strat_results['drawdown_end']),
|
||||
])
|
||||
|
||||
entry_adjustment_metrics = [
|
||||
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
|
||||
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
|
||||
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
|
||||
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
|
||||
|
||||
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
|
||||
# command stores these results and newer version of freqtrade must be able to handle old
|
||||
# results with missing new fields.
|
||||
metrics = [
|
||||
('Backtesting from', strat_results['backtest_start']),
|
||||
('Backtesting to', strat_results['backtest_end']),
|
||||
('Max open trades', strat_results['max_open_trades']),
|
||||
('', ''), # Empty line to improve readability
|
||||
('Total/Daily Avg Trades',
|
||||
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
|
||||
|
||||
('Starting balance', round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Final balance', round_coin_value(strat_results['final_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
||||
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
|
||||
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
|
||||
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
|
||||
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
|
||||
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
|
||||
in strat_results else 'N/A'),
|
||||
('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
|
||||
in strat_results else 'N/A'),
|
||||
('Trades per day', strat_results['trades_per_day']),
|
||||
('Avg. daily profit %',
|
||||
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
||||
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total trade volume', round_coin_value(strat_results['total_volume'],
|
||||
strat_results['stake_currency'])),
|
||||
*short_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
('Best Pair', f"{strat_results['best_pair']['key']} "
|
||||
f"{strat_results['best_pair']['profit_sum']:.2%}"),
|
||||
('Worst Pair', f"{strat_results['worst_pair']['key']} "
|
||||
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
|
||||
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
||||
('Worst trade', f"{worst_trade['pair']} "
|
||||
f"{worst_trade['profit_ratio']:.2%}"),
|
||||
|
||||
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Days win/draw/lose', f"{strat_results['winning_days']} / "
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
|
||||
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
|
||||
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
|
||||
('Entry/Exit Timeouts',
|
||||
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
|
||||
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
|
||||
*entry_adjustment_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
|
||||
('Min balance', round_coin_value(strat_results['csum_min'],
|
||||
strat_results['stake_currency'])),
|
||||
('Max balance', round_coin_value(strat_results['csum_max'],
|
||||
strat_results['stake_currency'])),
|
||||
|
||||
*drawdown_metrics,
|
||||
('Market change', f"{strat_results['market_change']:.2%}"),
|
||||
]
|
||||
|
||||
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
|
||||
else:
|
||||
start_balance = round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])
|
||||
stake_amount = round_coin_value(
|
||||
strat_results['stake_amount'], strat_results['stake_currency']
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
|
||||
message = ("No trades made. "
|
||||
f"Your starting balance was {start_balance}, "
|
||||
f"and your stake was {stake_amount}."
|
||||
)
|
||||
return message
|
||||
|
||||
|
||||
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
|
||||
backtest_breakdown=[]):
|
||||
"""
|
||||
Print results for one strategy
|
||||
"""
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if (results.get('results_per_enter_tag') is not None
|
||||
or results.get('results_per_buy_tag') is not None):
|
||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||
table = text_table_tags(
|
||||
"enter_tag",
|
||||
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
|
||||
stake_currency=stake_currency)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
|
||||
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
for period in backtest_breakdown:
|
||||
if period in results.get('periodic_breakdown', {}):
|
||||
days_breakdown_stats = results['periodic_breakdown'][period]
|
||||
else:
|
||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||
trade_list=results['trades'], period=period)
|
||||
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
|
||||
stake_currency=stake_currency, period=period)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def show_backtest_results(config: Config, backtest_stats: Dict):
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
show_backtest_result(
|
||||
strategy, results, stake_currency,
|
||||
config.get('backtest_breakdown', []))
|
||||
|
||||
if len(backtest_stats['strategy']) > 0:
|
||||
# Print Strategy summary table
|
||||
|
||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
||||
f" Max open trades : {results['max_open_trades']}")
|
||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print('\nFor more details, please look at the detail tables above')
|
||||
|
||||
|
||||
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
|
||||
if config.get('backtest_show_pair_list', False):
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
print(f"Pairs for Strategy {strategy}: \n[")
|
||||
for result in results['results_per_pair']:
|
||||
if result["key"] != 'TOTAL':
|
||||
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
|
||||
print("]")
|
|
@ -42,7 +42,7 @@ class _KeyValueStoreModel(ModelBase):
|
|||
int_value: Mapped[Optional[int]]
|
||||
|
||||
|
||||
class KeyValueStore():
|
||||
class KeyValueStore:
|
||||
"""
|
||||
Generic bot-wide, persistent key-value store
|
||||
Can be used to store generic values, e.g. very first bot startup time.
|
||||
|
|
|
@ -11,7 +11,7 @@ from freqtrade.persistence.models import PairLock
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PairLocks():
|
||||
class PairLocks:
|
||||
"""
|
||||
Pairlocks middleware class
|
||||
Abstracts the database layer away so it becomes optional - which will be necessary to support
|
||||
|
|
|
@ -10,6 +10,7 @@ from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast
|
|||
from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String,
|
||||
UniqueConstraint, desc, func, select)
|
||||
from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates
|
||||
from typing_extensions import Self
|
||||
|
||||
from freqtrade.constants import (CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC,
|
||||
NON_OPEN_EXCHANGE_STATES, BuySell, LongShort)
|
||||
|
@ -246,15 +247,15 @@ class Order(ModelBase):
|
|||
else:
|
||||
logger.warning(f"Did not find order for {order}.")
|
||||
|
||||
@staticmethod
|
||||
@classmethod
|
||||
def parse_from_ccxt_object(
|
||||
order: Dict[str, Any], pair: str, side: str,
|
||||
amount: Optional[float] = None, price: Optional[float] = None) -> 'Order':
|
||||
cls, order: Dict[str, Any], pair: str, side: str,
|
||||
amount: Optional[float] = None, price: Optional[float] = None) -> Self:
|
||||
"""
|
||||
Parse an order from a ccxt object and return a new order Object.
|
||||
Optional support for overriding amount and price is only used for test simplification.
|
||||
"""
|
||||
o = Order(
|
||||
o = cls(
|
||||
order_id=str(order['id']),
|
||||
ft_order_side=side,
|
||||
ft_pair=pair,
|
||||
|
@ -282,7 +283,7 @@ class Order(ModelBase):
|
|||
return Order.session.scalars(select(Order).filter(Order.order_id == order_id)).first()
|
||||
|
||||
|
||||
class LocalTrade():
|
||||
class LocalTrade:
|
||||
"""
|
||||
Trade database model.
|
||||
Used in backtesting - must be aligned to Trade model!
|
||||
|
@ -1641,8 +1642,8 @@ class Trade(ModelBase, LocalTrade):
|
|||
)).scalar_one()
|
||||
return trading_volume
|
||||
|
||||
@staticmethod
|
||||
def from_json(json_str: str) -> 'Trade':
|
||||
@classmethod
|
||||
def from_json(cls, json_str: str) -> Self:
|
||||
"""
|
||||
Create a Trade instance from a json string.
|
||||
|
||||
|
@ -1652,7 +1653,7 @@ class Trade(ModelBase, LocalTrade):
|
|||
"""
|
||||
import rapidjson
|
||||
data = rapidjson.loads(json_str)
|
||||
trade = Trade(
|
||||
trade = cls(
|
||||
id=data["trade_id"],
|
||||
pair=data["pair"],
|
||||
base_currency=data["base_currency"],
|
||||
|
|
|
@ -13,9 +13,8 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
|
|||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date
|
||||
from freqtrade.exchange.types import Tickers
|
||||
from freqtrade.misc import format_ms_time
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
|
||||
from freqtrade.util import dt_now
|
||||
from freqtrade.util import dt_now, format_ms_time
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
|
@ -15,7 +15,7 @@ from freqtrade.resolvers import ProtectionResolver
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProtectionManager():
|
||||
class ProtectionManager:
|
||||
|
||||
def __init__(self, config: Config, protections: List) -> None:
|
||||
self._config = config
|
||||
|
|
|
@ -7,12 +7,14 @@ from fastapi.websockets import WebSocket
|
|||
from pydantic import ValidationError
|
||||
|
||||
from freqtrade.enums import RPCMessageType, RPCRequestType
|
||||
from freqtrade.exceptions import FreqtradeException
|
||||
from freqtrade.rpc.api_server.api_auth import validate_ws_token
|
||||
from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc
|
||||
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema,
|
||||
WSRequestSchema, WSWhitelistMessage)
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage,
|
||||
WSMessageSchema, WSRequestSchema,
|
||||
WSWhitelistMessage)
|
||||
from freqtrade.rpc.rpc import RPC
|
||||
|
||||
|
||||
|
@ -27,7 +29,13 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC):
|
|||
Iterate over the messages from the channel and process the request
|
||||
"""
|
||||
async for message in channel:
|
||||
await _process_consumer_request(message, channel, rpc)
|
||||
try:
|
||||
await _process_consumer_request(message, channel, rpc)
|
||||
except FreqtradeException:
|
||||
logger.exception(f"Error processing request from {channel}")
|
||||
response = WSErrorMessage(data='Error processing request')
|
||||
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
|
||||
async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream):
|
||||
|
@ -62,13 +70,13 @@ async def _process_consumer_request(
|
|||
logger.error(f"Invalid request from {channel}: {e}")
|
||||
return
|
||||
|
||||
type, data = websocket_request.type, websocket_request.data
|
||||
type_, data = websocket_request.type, websocket_request.data
|
||||
response: WSMessageSchema
|
||||
|
||||
logger.debug(f"Request of type {type} from {channel}")
|
||||
logger.debug(f"Request of type {type_} from {channel}")
|
||||
|
||||
# If we have a request of type SUBSCRIBE, set the topics in this channel
|
||||
if type == RPCRequestType.SUBSCRIBE:
|
||||
if type_ == RPCRequestType.SUBSCRIBE:
|
||||
# If the request is empty, do nothing
|
||||
if not data:
|
||||
return
|
||||
|
@ -80,7 +88,7 @@ async def _process_consumer_request(
|
|||
# We don't send a response for subscriptions
|
||||
return
|
||||
|
||||
elif type == RPCRequestType.WHITELIST:
|
||||
elif type_ == RPCRequestType.WHITELIST:
|
||||
# Get whitelist
|
||||
whitelist = rpc._ws_request_whitelist()
|
||||
|
||||
|
@ -88,7 +96,7 @@ async def _process_consumer_request(
|
|||
response = WSWhitelistMessage(data=whitelist)
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
elif type == RPCRequestType.ANALYZED_DF:
|
||||
elif type_ == RPCRequestType.ANALYZED_DF:
|
||||
# Limit the amount of candles per dataframe to 'limit' or 1500
|
||||
limit = int(min(data.get('limit', 1500), 1500)) if data else None
|
||||
pair = data.get('pair', None) if data else None
|
||||
|
|
|
@ -14,7 +14,7 @@ class JobsContainer(TypedDict):
|
|||
error: Optional[str]
|
||||
|
||||
|
||||
class ApiBG():
|
||||
class ApiBG:
|
||||
# Backtesting type: Backtesting
|
||||
bt: Dict[str, Any] = {
|
||||
'bt': None,
|
||||
|
|
|
@ -66,4 +66,9 @@ class WSAnalyzedDFMessage(WSMessageSchema):
|
|||
type: RPCMessageType = RPCMessageType.ANALYZED_DF
|
||||
data: AnalyzedDFData
|
||||
|
||||
|
||||
class WSErrorMessage(WSMessageSchema):
|
||||
type: RPCMessageType = RPCMessageType.EXCEPTION
|
||||
data: str
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
|
|
|
@ -168,7 +168,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||
download_all_data_for_training(self.dp, self.config)
|
||||
else:
|
||||
# Gracious failures if freqAI is disabled but "start" is called.
|
||||
class DummyClass():
|
||||
class DummyClass:
|
||||
def start(self, *args, **kwargs):
|
||||
raise OperationalException(
|
||||
'freqAI is not enabled. '
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from freqtrade.util.datetime_helpers import (dt_floor_day, dt_from_ts, dt_humanize, dt_now, dt_ts,
|
||||
dt_utc, shorten_date)
|
||||
dt_utc, format_ms_time, shorten_date)
|
||||
from freqtrade.util.ft_precise import FtPrecise
|
||||
from freqtrade.util.periodic_cache import PeriodicCache
|
||||
|
||||
|
@ -7,11 +7,12 @@ from freqtrade.util.periodic_cache import PeriodicCache
|
|||
__all__ = [
|
||||
'dt_floor_day',
|
||||
'dt_from_ts',
|
||||
'dt_humanize',
|
||||
'dt_now',
|
||||
'dt_ts',
|
||||
'dt_utc',
|
||||
'dt_humanize',
|
||||
'shorten_date',
|
||||
'format_ms_time',
|
||||
'FtPrecise',
|
||||
'PeriodicCache',
|
||||
'shorten_date',
|
||||
]
|
||||
|
|
|
@ -61,3 +61,11 @@ def dt_humanize(dt: datetime, **kwargs) -> str:
|
|||
:param kwargs: kwargs to pass to arrow's humanize()
|
||||
"""
|
||||
return arrow.get(dt).humanize(**kwargs)
|
||||
|
||||
|
||||
def format_ms_time(date: int) -> str:
|
||||
"""
|
||||
convert MS date to readable format.
|
||||
: epoch-string in ms
|
||||
"""
|
||||
return datetime.fromtimestamp(date / 1000.0).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
-r docs/requirements-docs.txt
|
||||
|
||||
coveralls==3.3.1
|
||||
ruff==0.0.272
|
||||
mypy==1.3.0
|
||||
ruff==0.0.275
|
||||
mypy==1.4.1
|
||||
pre-commit==3.3.3
|
||||
pytest==7.3.2
|
||||
pytest==7.4.0
|
||||
pytest-asyncio==0.21.0
|
||||
pytest-cov==4.1.0
|
||||
pytest-mock==3.11.1
|
||||
|
@ -20,7 +20,7 @@ isort==5.12.0
|
|||
time-machine==2.10.0
|
||||
|
||||
# Convert jupyter notebooks to markdown documents
|
||||
nbconvert==7.5.0
|
||||
nbconvert==7.6.0
|
||||
|
||||
# mypy types
|
||||
types-cachetools==5.3.0.5
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
torch==2.0.1
|
||||
#until these branches will be released we can use this
|
||||
gymnasium==0.28.1
|
||||
stable_baselines3==2.0.0a13
|
||||
stable_baselines3==2.0.0
|
||||
sb3_contrib>=2.0.0a9
|
||||
# Progress bar for stable-baselines3 and sb3-contrib
|
||||
tqdm==4.65.0
|
||||
|
|
|
@ -9,4 +9,4 @@ catboost==1.2; 'arm' not in platform_machine
|
|||
lightgbm==3.3.5
|
||||
xgboost==1.7.6
|
||||
tensorboard==2.13.0
|
||||
datasieve==0.1.5
|
||||
datasieve==0.1.7
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
-r requirements.txt
|
||||
|
||||
# Required for hyperopt
|
||||
scipy==1.10.1
|
||||
scipy==1.11.1; python_version >= '3.9'
|
||||
scipy==1.10.1; python_version < '3.9'
|
||||
scikit-learn==1.1.3
|
||||
scikit-optimize==0.9.0
|
||||
filelock==3.12.2
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
numpy==1.25.0; python_version > '3.8'
|
||||
numpy==1.24.3; python_version <= '3.8'
|
||||
pandas==2.0.2
|
||||
pandas==2.0.3
|
||||
pandas-ta==0.3.14b
|
||||
|
||||
ccxt==3.1.44
|
||||
ccxt==4.0.12
|
||||
cryptography==41.0.1; platform_machine != 'armv7l'
|
||||
cryptography==40.0.1; platform_machine == 'armv7l'
|
||||
aiohttp==3.8.4
|
||||
SQLAlchemy==2.0.16
|
||||
SQLAlchemy==2.0.17
|
||||
python-telegram-bot==20.3
|
||||
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||
httpx>=0.24.1
|
||||
|
@ -39,7 +39,7 @@ orjson==3.9.1
|
|||
sdnotify==0.3.2
|
||||
|
||||
# API Server
|
||||
fastapi==0.97.0
|
||||
fastapi==0.99.1
|
||||
pydantic==1.10.9
|
||||
uvicorn==0.22.0
|
||||
pyjwt==2.7.0
|
||||
|
@ -61,5 +61,5 @@ schedule==1.2.0
|
|||
websockets==11.0.3
|
||||
janus==1.0.0
|
||||
|
||||
ast-comments==1.0.1
|
||||
ast-comments==1.1.0
|
||||
packaging==23.1
|
||||
|
|
|
@ -29,7 +29,7 @@ logging.basicConfig(
|
|||
logger = logging.getLogger("ft_rest_client")
|
||||
|
||||
|
||||
class FtRestClient():
|
||||
class FtRestClient:
|
||||
|
||||
def __init__(self, serverurl, username=None, password=None):
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ EXCHANGES = {
|
|||
'hasQuoteVolumeFutures': True,
|
||||
'leverage_tiers_public': False,
|
||||
'leverage_in_spot_market': False,
|
||||
'trades_lookback_hours': 4,
|
||||
'private_methods': [
|
||||
'fapiPrivateGetPositionSideDual',
|
||||
'fapiPrivateGetMultiAssetsMargin'
|
||||
|
@ -98,6 +99,7 @@ EXCHANGES = {
|
|||
'timeframe': '1h',
|
||||
'leverage_tiers_public': False,
|
||||
'leverage_in_spot_market': True,
|
||||
'trades_lookback_hours': 12,
|
||||
},
|
||||
'kucoin': {
|
||||
'pair': 'XRP/USDT',
|
||||
|
@ -342,7 +344,7 @@ def exchange_futures(request, exchange_conf, class_mocker):
|
|||
|
||||
|
||||
@pytest.mark.longrun
|
||||
class TestCCXTExchange():
|
||||
class TestCCXTExchange:
|
||||
|
||||
def test_load_markets(self, exchange: EXCHANGE_FIXTURE_TYPE):
|
||||
exch, exchangename = exchange
|
||||
|
@ -640,7 +642,21 @@ class TestCCXTExchange():
|
|||
assert isinstance(funding_fee, float)
|
||||
# assert funding_fee > 0
|
||||
|
||||
# TODO: tests fetch_trades (?)
|
||||
def test_ccxt__async_get_trade_history(self, exchange: EXCHANGE_FIXTURE_TYPE):
|
||||
exch, exchangename = exchange
|
||||
if not (lookback := EXCHANGES[exchangename].get('trades_lookback_hours')):
|
||||
pytest.skip('test_fetch_trades not enabled for this exchange')
|
||||
pair = EXCHANGES[exchangename]['pair']
|
||||
since = int((datetime.now(timezone.utc) - timedelta(hours=lookback)).timestamp() * 1000)
|
||||
res = exch.loop.run_until_complete(
|
||||
exch._async_get_trade_history(pair, since, None, None)
|
||||
)
|
||||
assert len(res) == 2
|
||||
res_pair, res_trades = res
|
||||
assert res_pair == pair
|
||||
assert isinstance(res_trades, list)
|
||||
assert res_trades[0][0] >= since
|
||||
assert len(res_trades) > 1200
|
||||
|
||||
def test_ccxt_get_fee(self, exchange: EXCHANGE_FIXTURE_TYPE):
|
||||
exch, exchangename = exchange
|
||||
|
|
|
@ -1437,9 +1437,11 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
|
|||
strattable_mock = MagicMock()
|
||||
strat_summary = MagicMock()
|
||||
|
||||
mocker.patch.multiple('freqtrade.optimize.optimize_reports',
|
||||
mocker.patch.multiple('freqtrade.optimize.optimize_reports.bt_output',
|
||||
text_table_bt_results=text_table_mock,
|
||||
text_table_strategy=strattable_mock,
|
||||
)
|
||||
mocker.patch.multiple('freqtrade.optimize.optimize_reports.optimize_reports',
|
||||
generate_pair_metrics=MagicMock(),
|
||||
generate_exit_reason_stats=sell_reason_mock,
|
||||
generate_strategy_comparison=strat_summary,
|
||||
|
|
|
@ -14,15 +14,16 @@ from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backte
|
|||
load_backtest_stats)
|
||||
from freqtrade.edge import PairInfo
|
||||
from freqtrade.enums import ExitType
|
||||
from freqtrade.optimize.optimize_reports import (_get_resample_from_period, generate_backtest_stats,
|
||||
generate_daily_stats, generate_edge_table,
|
||||
generate_exit_reason_stats, generate_pair_metrics,
|
||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_daily_stats,
|
||||
generate_edge_table, generate_exit_reason_stats,
|
||||
generate_pair_metrics,
|
||||
generate_periodic_breakdown_stats,
|
||||
generate_strategy_comparison,
|
||||
generate_trading_stats, show_sorted_pairlist,
|
||||
store_backtest_analysis_results,
|
||||
store_backtest_stats, text_table_bt_results,
|
||||
text_table_exit_reason, text_table_strategy)
|
||||
from freqtrade.optimize.optimize_reports.optimize_reports import _get_resample_from_period
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
from freqtrade.util import dt_ts
|
||||
from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc
|
||||
|
@ -209,7 +210,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
|
|||
|
||||
def test_store_backtest_stats(testdatadir, mocker):
|
||||
|
||||
dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_json')
|
||||
dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.bt_storage.file_dump_json')
|
||||
|
||||
store_backtest_stats(testdatadir, {'metadata': {}}, '2022_01_01_15_05_13')
|
||||
|
||||
|
@ -228,7 +229,8 @@ def test_store_backtest_stats(testdatadir, mocker):
|
|||
|
||||
def test_store_backtest_candles(testdatadir, mocker):
|
||||
|
||||
dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_joblib')
|
||||
dump_mock = mocker.patch(
|
||||
'freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib')
|
||||
|
||||
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
|
||||
|
||||
|
|
|
@ -1605,13 +1605,13 @@ def test_create_stoploss_order_insufficient_funds(
|
|||
assert mock_insuf.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("is_short,bid,ask,stop_price,amt,hang_price", [
|
||||
(False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 27.39726027, 3),
|
||||
(True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 27.27272727, 1.5),
|
||||
@pytest.mark.parametrize("is_short,bid,ask,stop_price,hang_price", [
|
||||
(False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 3),
|
||||
(True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 1.5),
|
||||
])
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
def test_handle_stoploss_on_exchange_trailing(
|
||||
mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, amt, hang_price
|
||||
mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, hang_price
|
||||
) -> None:
|
||||
# When trailing stoploss is set
|
||||
enter_order = limit_order[entry_side(is_short)]
|
||||
|
@ -1626,8 +1626,8 @@ def test_handle_stoploss_on_exchange_trailing(
|
|||
'last': 2.19,
|
||||
}),
|
||||
create_order=MagicMock(side_effect=[
|
||||
{'id': enter_order['id']},
|
||||
{'id': exit_order['id']},
|
||||
enter_order,
|
||||
exit_order,
|
||||
]),
|
||||
get_fee=fee,
|
||||
)
|
||||
|
@ -1723,7 +1723,7 @@ def test_handle_stoploss_on_exchange_trailing(
|
|||
|
||||
cancel_order_mock.assert_called_once_with('100', 'ETH/USDT')
|
||||
stoploss_order_mock.assert_called_once_with(
|
||||
amount=pytest.approx(amt),
|
||||
amount=30,
|
||||
pair='ETH/USDT',
|
||||
order_types=freqtrade.strategy.order_types,
|
||||
stop_price=stop_price[1],
|
||||
|
@ -1992,7 +1992,7 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde
|
|||
|
||||
enter_order = limit_order['buy']
|
||||
exit_order = limit_order['sell']
|
||||
|
||||
enter_order['average'] = 2.19
|
||||
# When trailing stoploss is set
|
||||
stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'})
|
||||
patch_RPCManager(mocker)
|
||||
|
@ -2009,8 +2009,8 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde
|
|||
'last': 2.19
|
||||
}),
|
||||
create_order=MagicMock(side_effect=[
|
||||
{'id': enter_order['id']},
|
||||
{'id': exit_order['id']},
|
||||
enter_order,
|
||||
exit_order,
|
||||
]),
|
||||
get_fee=fee,
|
||||
create_stoploss=stoploss,
|
||||
|
@ -2106,7 +2106,7 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde
|
|||
assert trade.stop_loss == 4.4 * 0.99
|
||||
cancel_order_mock.assert_called_once_with('100', 'NEO/BTC')
|
||||
stoploss_order_mock.assert_called_once_with(
|
||||
amount=pytest.approx(11.41438356),
|
||||
amount=30,
|
||||
pair='NEO/BTC',
|
||||
order_types=freqtrade.strategy.order_types,
|
||||
stop_price=4.4 * 0.99,
|
||||
|
@ -2976,11 +2976,12 @@ def test_manage_open_orders_exit_usercustom(
|
|||
) -> None:
|
||||
default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1}
|
||||
open_trade_usdt.open_order_id = limit_sell_order_old['id']
|
||||
order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked', 'sell')
|
||||
open_trade_usdt.orders[0] = order
|
||||
if is_short:
|
||||
limit_sell_order_old['side'] = 'buy'
|
||||
open_trade_usdt.is_short = is_short
|
||||
open_exit_order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked',
|
||||
'buy' if is_short else 'sell')
|
||||
open_trade_usdt.orders[-1] = open_exit_order
|
||||
|
||||
rpc_mock = patch_RPCManager(mocker)
|
||||
cancel_order_mock = MagicMock()
|
||||
|
@ -3011,8 +3012,8 @@ def test_manage_open_orders_exit_usercustom(
|
|||
freqtrade.manage_open_orders()
|
||||
assert cancel_order_mock.call_count == 0
|
||||
assert rpc_mock.call_count == 1
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == (0 if is_short else 1)
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == (1 if is_short else 0)
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||
|
||||
freqtrade.strategy.check_exit_timeout = MagicMock(side_effect=KeyError)
|
||||
freqtrade.strategy.check_entry_timeout = MagicMock(side_effect=KeyError)
|
||||
|
@ -3020,8 +3021,8 @@ def test_manage_open_orders_exit_usercustom(
|
|||
freqtrade.manage_open_orders()
|
||||
assert cancel_order_mock.call_count == 0
|
||||
assert rpc_mock.call_count == 1
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == (0 if is_short else 1)
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == (1 if is_short else 0)
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||
|
||||
# Return True - sells!
|
||||
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=True)
|
||||
|
@ -3029,8 +3030,8 @@ def test_manage_open_orders_exit_usercustom(
|
|||
freqtrade.manage_open_orders()
|
||||
assert cancel_order_mock.call_count == 1
|
||||
assert rpc_mock.call_count == 2
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == (0 if is_short else 1)
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == (1 if is_short else 0)
|
||||
assert freqtrade.strategy.check_exit_timeout.call_count == 1
|
||||
assert freqtrade.strategy.check_entry_timeout.call_count == 0
|
||||
trade = Trade.session.scalars(select(Trade)).first()
|
||||
# cancelling didn't succeed - order-id remains open.
|
||||
assert trade.open_order_id is not None
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# pragma pylint: disable=missing-docstring,C0103
|
||||
|
||||
import datetime
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
@ -9,7 +8,7 @@ import pandas as pd
|
|||
import pytest
|
||||
|
||||
from freqtrade.misc import (dataframe_to_json, decimals_per_coin, deep_merge_dicts, file_dump_json,
|
||||
file_load_json, format_ms_time, json_to_dataframe, pair_to_filename,
|
||||
file_load_json, json_to_dataframe, pair_to_filename,
|
||||
parse_db_uri_for_logging, plural, render_template,
|
||||
render_template_with_fallback, round_coin_value, safe_value_fallback,
|
||||
safe_value_fallback2)
|
||||
|
@ -91,19 +90,6 @@ def test_pair_to_filename(pair, expected_result):
|
|||
assert pair_s == expected_result
|
||||
|
||||
|
||||
def test_format_ms_time() -> None:
|
||||
# Date 2018-04-10 18:02:01
|
||||
date_in_epoch_ms = 1523383321000
|
||||
date = format_ms_time(date_in_epoch_ms)
|
||||
assert type(date) is str
|
||||
res = datetime.datetime(2018, 4, 10, 18, 2, 1, tzinfo=datetime.timezone.utc)
|
||||
assert date == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
res = datetime.datetime(2017, 12, 13, 8, 2, 1, tzinfo=datetime.timezone.utc)
|
||||
# Date 2017-12-13 08:02:01
|
||||
date_in_epoch_ms = 1513152121000
|
||||
assert format_ms_time(date_in_epoch_ms) == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
|
||||
|
||||
def test_safe_value_fallback():
|
||||
dict1 = {'keya': None, 'keyb': 2, 'keyc': 5, 'keyd': None}
|
||||
assert safe_value_fallback(dict1, 'keya', 'keyb') == 2
|
||||
|
|
|
@ -3,8 +3,8 @@ from datetime import datetime, timedelta, timezone
|
|||
import pytest
|
||||
import time_machine
|
||||
|
||||
from freqtrade.util import dt_floor_day, dt_from_ts, dt_now, dt_ts, dt_utc, shorten_date
|
||||
from freqtrade.util.datetime_helpers import dt_humanize
|
||||
from freqtrade.util import (dt_floor_day, dt_from_ts, dt_humanize, dt_now, dt_ts, dt_utc,
|
||||
format_ms_time, shorten_date)
|
||||
|
||||
|
||||
def test_dt_now():
|
||||
|
@ -57,3 +57,16 @@ def test_dt_humanize() -> None:
|
|||
assert dt_humanize(dt_now()) == 'just now'
|
||||
assert dt_humanize(dt_now(), only_distance=True) == 'instantly'
|
||||
assert dt_humanize(dt_now() - timedelta(hours=16), only_distance=True) == '16 hours'
|
||||
|
||||
|
||||
def test_format_ms_time() -> None:
|
||||
# Date 2018-04-10 18:02:01
|
||||
date_in_epoch_ms = 1523383321000
|
||||
date = format_ms_time(date_in_epoch_ms)
|
||||
assert type(date) is str
|
||||
res = datetime(2018, 4, 10, 18, 2, 1, tzinfo=timezone.utc)
|
||||
assert date == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
res = datetime(2017, 12, 13, 8, 2, 1, tzinfo=timezone.utc)
|
||||
# Date 2017-12-13 08:02:01
|
||||
date_in_epoch_ms = 1513152121000
|
||||
assert format_ms_time(date_in_epoch_ms) == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
|
|
Loading…
Reference in New Issue
Block a user