mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-10 10:21:59 +00:00
Merge branch 'freqtrade:develop' into feature/multiple_open_orders
This commit is contained in:
commit
f397d973f3
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
@ -160,7 +160,8 @@ jobs:
|
|||
- name: Installation - macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
# brew update
|
||||
# TODO: Should be the brew upgrade
|
||||
# homebrew fails to update python due to unlinking failures
|
||||
# https://github.com/actions/runner-images/issues/6817
|
||||
rm /usr/local/bin/2to3 || true
|
||||
|
@ -460,7 +461,7 @@ jobs:
|
|||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI (Test)
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.8
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
@ -468,7 +469,7 @@ jobs:
|
|||
repository_url: https://test.pypi.org/legacy/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.8
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
|
|
@ -13,12 +13,12 @@ repos:
|
|||
- id: mypy
|
||||
exclude: build_helpers
|
||||
additional_dependencies:
|
||||
- types-cachetools==5.3.0.5
|
||||
- types-cachetools==5.3.0.6
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.31.0.1
|
||||
- types-tabulate==0.9.0.2
|
||||
- types-python-dateutil==2.8.19.13
|
||||
- SQLAlchemy==2.0.16
|
||||
- types-requests==2.31.0.2
|
||||
- types-tabulate==0.9.0.3
|
||||
- types-python-dateutil==2.8.19.14
|
||||
- SQLAlchemy==2.0.19
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.27-cp310-cp310-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.27-cp310-cp310-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.27-cp311-cp311-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.27-cp311-cp311-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.27-cp38-cp38-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.27-cp38-cp38-win_amd64.whl
Normal file
Binary file not shown.
BIN
build_helpers/TA_Lib-0.4.27-cp39-cp39-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.27-cp39-cp39-win_amd64.whl
Normal file
Binary file not shown.
|
@ -1,21 +1,11 @@
|
|||
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
||||
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
||||
# vendored Wheels compiled via https://github.com/xmatthias/ta-lib-python/tree/ta_bundled_040
|
||||
|
||||
python -m pip install --upgrade pip wheel
|
||||
|
||||
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||
|
||||
if ($pyv -eq '3.8') {
|
||||
pip install build_helpers\TA_Lib-0.4.26-cp38-cp38-win_amd64.whl
|
||||
}
|
||||
if ($pyv -eq '3.9') {
|
||||
pip install build_helpers\TA_Lib-0.4.26-cp39-cp39-win_amd64.whl
|
||||
}
|
||||
if ($pyv -eq '3.10') {
|
||||
pip install build_helpers\TA_Lib-0.4.26-cp310-cp310-win_amd64.whl
|
||||
}
|
||||
if ($pyv -eq '3.11') {
|
||||
pip install build_helpers\TA_Lib-0.4.26-cp311-cp311-win_amd64.whl
|
||||
}
|
||||
|
||||
pip install --find-links=build_helpers\ TA-Lib
|
||||
|
||||
pip install -r requirements-dev.txt
|
||||
pip install -e .
|
||||
|
|
Binary file not shown.
|
@ -206,6 +206,6 @@
|
|||
"recursive_strategy_search": false,
|
||||
"add_config_files": [],
|
||||
"reduce_df_footprint": false,
|
||||
"dataformat_ohlcv": "json",
|
||||
"dataformat_trades": "jsongz"
|
||||
"dataformat_ohlcv": "feather",
|
||||
"dataformat_trades": "feather"
|
||||
}
|
||||
|
|
|
@ -32,5 +32,5 @@ services:
|
|||
--logfile /freqtrade/user_data/logs/freqtrade.log
|
||||
--db-url sqlite:////freqtrade/user_data/tradesv3.sqlite
|
||||
--config /freqtrade/user_data/config.json
|
||||
--freqai-model XGBoostClassifier
|
||||
--strategy SampleStrategy
|
||||
--freqaimodel XGBoostRegressor
|
||||
--strategy FreqaiExampleStrategy
|
||||
|
|
|
@ -103,6 +103,22 @@ The indicators have to be present in your strategy's main DataFrame (either for
|
|||
timeframe or for informative timeframes) otherwise they will simply be ignored in the script
|
||||
output.
|
||||
|
||||
There are a range of candle and trade-related fields that are included in the analysis so are
|
||||
automatically accessible by including them on the indicator-list, and these include:
|
||||
|
||||
- **open_date :** trade open datetime
|
||||
- **close_date :** trade close datetime
|
||||
- **min_rate :** minimum price seen throughout the position
|
||||
- **max_rate :** maxiumum price seen throughout the position
|
||||
- **open :** signal candle open price
|
||||
- **close :** signal candle close price
|
||||
- **high :** signal candle high price
|
||||
- **low :** signal candle low price
|
||||
- **volume :** signal candle volumne
|
||||
- **profit_ratio :** trade profit ratio
|
||||
- **profit_abs :** absolute profit return of the trade
|
||||
|
||||
|
||||
### Filtering the trade output by date
|
||||
|
||||
To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format:
|
||||
|
|
|
@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy):
|
|||
|
||||
### Dynamic parameters
|
||||
|
||||
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||
Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||
|
||||
``` python
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ A backtesting result will look like that:
|
|||
| Sharpe | 2.97 |
|
||||
| Calmar | 6.29 |
|
||||
| Profit factor | 1.11 |
|
||||
| Expectancy | -0.15 |
|
||||
| Expectancy (Ratio) | -0.15 (-0.05) |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
|
@ -324,6 +324,7 @@ A backtesting result will look like that:
|
|||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Max Consecutive Wins / Loss | 3 / 4 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| Canceled Trade Entries | 34 |
|
||||
|
@ -409,7 +410,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||
| Sharpe | 2.97 |
|
||||
| Calmar | 6.29 |
|
||||
| Profit factor | 1.11 |
|
||||
| Expectancy | -0.15 |
|
||||
| Expectancy (Ratio) | -0.15 (-0.05) |
|
||||
| Avg. stake amount | 0.001 BTC |
|
||||
| Total trade volume | 0.429 BTC |
|
||||
| | |
|
||||
|
@ -428,6 +429,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||
| Days win/draw/lose | 12 / 82 / 25 |
|
||||
| Avg. Duration Winners | 4:23:00 |
|
||||
| Avg. Duration Loser | 6:55:00 |
|
||||
| Max Consecutive Wins / Loss | 3 / 4 |
|
||||
| Rejected Entry signals | 3089 |
|
||||
| Entry/Exit Timeouts | 0 / 0 |
|
||||
| Canceled Trade Entries | 34 |
|
||||
|
@ -467,6 +469,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||
- `Best day` / `Worst day`: Best and worst day based on daily profit.
|
||||
- `Days win/draw/lose`: Winning / Losing days (draws are usually days without closed trade).
|
||||
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
||||
- `Max Consecutive Wins / Loss`: Maximum consecutive wins/losses in a row.
|
||||
- `Rejected Entry signals`: Trade entry signals that could not be acted upon due to `max_open_trades` being reached.
|
||||
- `Entry/Exit Timeouts`: Entry/exit orders which did not fill (only applicable if custom pricing is used).
|
||||
- `Canceled Trade Entries`: Number of trades that have been canceled by user request via `adjust_entry_price`.
|
||||
|
@ -534,6 +537,7 @@ Since backtesting lacks some detailed information about what happens within a ca
|
|||
- ROI
|
||||
- exits are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the exit will be at 2%)
|
||||
- exits are never "below the candle", so a ROI of 2% may result in a exit at 2.4% if low was at 2.4% profit
|
||||
- ROI entries which came into effect on the triggering candle (e.g. `120: 0.02` for 1h candles, from `60: 0.05`) will use the candle's open as exit rate
|
||||
- Force-exits caused by `<N>=-1` ROI entries use low as exit value, unless N falls on the candle open (e.g. `120: -1` for 1h candles)
|
||||
- Stoploss exits happen exactly at stoploss price, even if low was lower, but the loss will be `2 * fees` higher than the stoploss price
|
||||
- Stoploss is evaluated before ROI within one candle. So you can often see more trades with the `stoploss` exit reason comparing to the results obtained with the same strategy in the Dry Run/Live Trade modes
|
||||
|
|
|
@ -251,8 +251,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
|
||||
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
|
||||
| `add_config_files` | Additional config files. These files will be loaded and merged with the current config file. The files are resolved relative to the initial file.<br> *Defaults to `[]`*. <br> **Datatype:** List of strings
|
||||
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `json`*. <br> **Datatype:** String
|
||||
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `jsongz`*. <br> **Datatype:** String
|
||||
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `feather`*. <br> **Datatype:** String
|
||||
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `feather`*. <br> **Datatype:** String
|
||||
| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage (and decreasing train/inference timing in FreqAI). (Currently only affects FreqAI use-cases) <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
|
||||
### Parameters in the strategy
|
||||
|
@ -682,16 +682,14 @@ To use a proxy for exchange connections - you will have to define the proxies as
|
|||
{
|
||||
"exchange": {
|
||||
"ccxt_config": {
|
||||
"aiohttp_proxy": "http://addr:port",
|
||||
"proxies": {
|
||||
"http": "http://addr:port",
|
||||
"https": "http://addr:port"
|
||||
},
|
||||
"httpsProxy": "http://addr:port",
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For more information on available proxy types, please consult the [ccxt proxy documentation](https://docs.ccxt.com/#/README?id=proxy).
|
||||
|
||||
## Next step
|
||||
|
||||
Now you have configured your config.json, the next step is to [start your bot](bot-usage.md).
|
||||
|
|
|
@ -27,11 +27,11 @@ usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||
[--exchange EXCHANGE]
|
||||
[-t TIMEFRAMES [TIMEFRAMES ...]] [--erase]
|
||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
||||
[--data-format-trades {json,jsongz,hdf5}]
|
||||
[--data-format-trades {json,jsongz,hdf5,feather}]
|
||||
[--trading-mode {spot,margin,futures}]
|
||||
[--prepend]
|
||||
|
||||
optional arguments:
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||
Limit command to these pairs. Pairs are space-
|
||||
|
@ -48,8 +48,7 @@ optional arguments:
|
|||
--dl-trades Download trades instead of OHLCV data. The bot will
|
||||
resample trades to the desired timeframe as specified
|
||||
as --timeframes/-t.
|
||||
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||
config is provided.
|
||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
||||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||
Specify which tickers to download. Space-separated
|
||||
list. Default: `1m 5m`.
|
||||
|
@ -57,17 +56,18 @@ optional arguments:
|
|||
exchange/pairs/timeframes.
|
||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||
Storage format for downloaded candle (OHLCV) data.
|
||||
(default: `json`).
|
||||
--data-format-trades {json,jsongz,hdf5}
|
||||
(default: `feather`).
|
||||
--data-format-trades {json,jsongz,hdf5,feather}
|
||||
Storage format for downloaded trades data. (default:
|
||||
`jsongz`).
|
||||
`feather`).
|
||||
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
||||
Select Trading mode
|
||||
--prepend Allow data prepending. (Data-appending is disabled)
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
--logfile FILE, --log-file FILE
|
||||
Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
|
@ -157,7 +157,7 @@ Freqtrade currently supports the following data-formats:
|
|||
* `json` - plain "text" json files
|
||||
* `jsongz` - a gzip-zipped version of json files
|
||||
* `hdf5` - a high performance datastore
|
||||
* `feather` - a dataformat based on Apache Arrow (OHLCV only)
|
||||
* `feather` - a dataformat based on Apache Arrow
|
||||
* `parquet` - columnar datastore (OHLCV only)
|
||||
|
||||
By default, OHLCV data is stored as `json` data, while trades data is stored as `jsongz` data.
|
||||
|
@ -255,7 +255,7 @@ usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||
[--trading-mode {spot,margin,futures}]
|
||||
[--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]]
|
||||
|
||||
optional arguments:
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||
Limit command to these pairs. Pairs are space-
|
||||
|
@ -266,19 +266,20 @@ optional arguments:
|
|||
Destination format for data conversion.
|
||||
--erase Clean all existing data for the selected
|
||||
exchange/pairs/timeframes.
|
||||
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||
config is provided.
|
||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
||||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||
Specify which tickers to download. Space-separated
|
||||
list. Default: `1m 5m`.
|
||||
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
||||
Select Trading mode
|
||||
--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]
|
||||
Select candle type to use
|
||||
Select candle type to convert. Defaults to all
|
||||
available types.
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
--logfile FILE, --log-file FILE
|
||||
Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
|
@ -291,7 +292,6 @@ Common arguments:
|
|||
Path to directory with historical backtesting data.
|
||||
--userdir PATH, --user-data-dir PATH
|
||||
Path to userdata directory.
|
||||
|
||||
```
|
||||
|
||||
### Example converting data
|
||||
|
@ -314,7 +314,7 @@ usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||
{json,jsongz,hdf5,feather,parquet}
|
||||
[--erase] [--exchange EXCHANGE]
|
||||
|
||||
optional arguments:
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||
Limit command to these pairs. Pairs are space-
|
||||
|
@ -325,12 +325,12 @@ optional arguments:
|
|||
Destination format for data conversion.
|
||||
--erase Clean all existing data for the selected
|
||||
exchange/pairs/timeframes.
|
||||
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||
config is provided.
|
||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
--logfile FILE, --log-file FILE
|
||||
Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
|
@ -367,9 +367,9 @@ usage: freqtrade trades-to-ohlcv [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||
[-t TIMEFRAMES [TIMEFRAMES ...]]
|
||||
[--exchange EXCHANGE]
|
||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
||||
[--data-format-trades {json,jsongz,hdf5}]
|
||||
[--data-format-trades {json,jsongz,hdf5,feather}]
|
||||
|
||||
optional arguments:
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||
Limit command to these pairs. Pairs are space-
|
||||
|
@ -377,18 +377,18 @@ optional arguments:
|
|||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||
Specify which tickers to download. Space-separated
|
||||
list. Default: `1m 5m`.
|
||||
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||
config is provided.
|
||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||
Storage format for downloaded candle (OHLCV) data.
|
||||
(default: `json`).
|
||||
--data-format-trades {json,jsongz,hdf5}
|
||||
(default: `feather`).
|
||||
--data-format-trades {json,jsongz,hdf5,feather}
|
||||
Storage format for downloaded trades data. (default:
|
||||
`jsongz`).
|
||||
`feather`).
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
--logfile FILE, --log-file FILE
|
||||
Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
|
@ -422,13 +422,12 @@ usage: freqtrade list-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
|||
[--trading-mode {spot,margin,futures}]
|
||||
[--show-timerange]
|
||||
|
||||
optional arguments:
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||
config is provided.
|
||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||
Storage format for downloaded candle (OHLCV) data.
|
||||
(default: `json`).
|
||||
(default: `feather`).
|
||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||
Limit command to these pairs. Pairs are space-
|
||||
separated.
|
||||
|
@ -439,7 +438,8 @@ optional arguments:
|
|||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
--logfile FILE Log to the file specified. Special values are:
|
||||
--logfile FILE, --log-file FILE
|
||||
Log to the file specified. Special values are:
|
||||
'syslog', 'journald'. See the documentation for more
|
||||
details.
|
||||
-V, --version show program's version number and exit
|
||||
|
@ -474,7 +474,7 @@ ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
|
|||
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
|
||||
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.
|
||||
|
||||
Since this data is large by default, the files use gzip by default. They are stored in your data-directory with the naming convention of `<pair>-trades.json.gz` (`ETH_BTC-trades.json.gz`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
|
||||
Since this data is large by default, the files use the feather fileformat by default. They are stored in your data-directory with the naming convention of `<pair>-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
|
||||
|
||||
To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades, and resamples the data locally.
|
||||
|
||||
|
|
|
@ -453,7 +453,13 @@ Once the PR against stable is merged (best right after merging):
|
|||
* Use the button "Draft a new release" in the Github UI (subsection releases).
|
||||
* Use the version-number specified as tag.
|
||||
* Use "stable" as reference (this step comes after the above PR is merged).
|
||||
* Use the above changelog as release comment (as codeblock)
|
||||
* Use the above changelog as release comment (as codeblock).
|
||||
* Use the below snippet for the new release
|
||||
|
||||
??? Tip "Release template"
|
||||
````
|
||||
--8<-- "includes/release_template.md"
|
||||
````
|
||||
|
||||
## Releases
|
||||
|
||||
|
|
|
@ -259,10 +259,17 @@ The configuration parameter `exchange.unknown_fee_rate` can be used to specify t
|
|||
|
||||
Futures trading on bybit is currently supported for USDT markets, and will use isolated futures mode.
|
||||
Users with unified accounts (there's no way back) can create a Sub-account which will start as "non-unified", and can therefore use isolated futures.
|
||||
On startup, freqtrade will set the position mode to "One-way Mode" for the whole (sub)account. This avoids making this call over and over again (slowing down bot operations), but means that changes to this setting may result in exceptions and errors.
|
||||
On startup, freqtrade will set the position mode to "One-way Mode" for the whole (sub)account. This avoids making this call over and over again (slowing down bot operations), but means that changes to this setting may result in exceptions and errors
|
||||
|
||||
As bybit doesn't provide funding rate history, the dry-run calculation is used for live trades as well.
|
||||
|
||||
API Keys for live futures trading (Subaccount on non-unified) must have the following permissions:
|
||||
* Read-write
|
||||
* Contract - Orders
|
||||
* Contract - Positions
|
||||
|
||||
We do strongly recommend to limit all API keys to the IP you're going to use it from.
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||
On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
||||
|
|
|
@ -160,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat
|
|||
|------------|-------------|
|
||||
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
||||
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
||||
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ class MyFreqaiModel(BaseRegressionModel):
|
|||
"""
|
||||
feature_pipeline = Pipeline([
|
||||
('qt', SKLearnWrapper(QuantileTransformer(output_distribution='normal'))),
|
||||
('di', ds.DissimilarityIndex(di_threshold=1)
|
||||
('di', ds.DissimilarityIndex(di_threshold=1))
|
||||
])
|
||||
|
||||
return feature_pipeline
|
||||
|
|
|
@ -42,7 +42,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using the DBSCAN algorithm to identify and remove outliers from training and prediction data. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||
| `inlier_metric_window` | If set, FreqAI adds an `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`, i.e., the number of previous time points to compare the current candle to. Details of how the `inlier_metric` is computed can be found [here](freqai-feature-engineering.md#inlier-metric). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||
|
|
|
@ -433,9 +433,14 @@ While this strategy is most likely too simple to provide consistent profit, it s
|
|||
`range` property may also be used with `DecimalParameter` and `CategoricalParameter`. `RealParameter` does not provide this property due to infinite search space.
|
||||
|
||||
??? Hint "Performance tip"
|
||||
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value. This will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
|
||||
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, there are two alternatives to reduce RAM usage
|
||||
|
||||
In either case, you should try to use space ranges as small as possible this will improve CPU/RAM usage in both scenarios.
|
||||
* Move `ema_short` and `ema_long` calculations from `populate_indicators()` to `populate_entry_trend()`. Since `populate_entry_trend()` gonna be calculated every epochs, you don't need to use `.range` functionality.
|
||||
* hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value.
|
||||
|
||||
These alternatives will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
|
||||
|
||||
Whether you are using `.range` functionality or the alternatives above, you should try to use space ranges as small as possible since this will improve CPU/RAM usage.
|
||||
|
||||
|
||||
## Optimizing protections
|
||||
|
|
|
@ -184,6 +184,8 @@ The RemotePairList is defined in the pairlists section of the configuration sett
|
|||
"pairlists": [
|
||||
{
|
||||
"method": "RemotePairList",
|
||||
"mode": "whitelist",
|
||||
"processing_mode": "filter",
|
||||
"pairlist_url": "https://example.com/pairlist",
|
||||
"number_assets": 10,
|
||||
"refresh_period": 1800,
|
||||
|
@ -194,6 +196,14 @@ The RemotePairList is defined in the pairlists section of the configuration sett
|
|||
]
|
||||
```
|
||||
|
||||
The optional `mode` option specifies if the pairlist should be used as a `blacklist` or as a `whitelist`. The default value is "whitelist".
|
||||
|
||||
The optional `processing_mode` option in the RemotePairList configuration determines how the retrieved pairlist is processed. It can have two values: "filter" or "append".
|
||||
|
||||
In "filter" mode, the retrieved pairlist is used as a filter. Only the pairs present in both the original pairlist and the retrieved pairlist are included in the final pairlist. Other pairs are filtered out.
|
||||
|
||||
In "append" mode, the retrieved pairlist is added to the original pairlist. All pairs from both lists are included in the final pairlist without any filtering.
|
||||
|
||||
The `pairlist_url` option specifies the URL of the remote server where the pairlist is located, or the path to a local file (if file:/// is prepended). This allows the user to use either a remote server or a local file as the source for the pairlist.
|
||||
|
||||
The user is responsible for providing a server or local file that returns a JSON object with the following structure:
|
||||
|
@ -201,7 +211,7 @@ The user is responsible for providing a server or local file that returns a JSON
|
|||
```json
|
||||
{
|
||||
"pairs": ["XRP/USDT", "ETH/USDT", "LTC/USDT"],
|
||||
"refresh_period": 1800,
|
||||
"refresh_period": 1800
|
||||
}
|
||||
```
|
||||
|
||||
|
|
37
docs/includes/release_template.md
Normal file
37
docs/includes/release_template.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
## Highlighted changes
|
||||
|
||||
- ...
|
||||
|
||||
### How to update
|
||||
|
||||
As always, you can update your bot using one of the following commands:
|
||||
|
||||
#### docker-compose
|
||||
|
||||
```bash
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
#### Installation via setup script
|
||||
|
||||
```
|
||||
# Deactivate venv and run
|
||||
./setup.sh --update
|
||||
```
|
||||
|
||||
#### Plain native installation
|
||||
|
||||
```
|
||||
git pull
|
||||
pip install -U -r requirements.txt
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Expand full changelog</summary>
|
||||
|
||||
```
|
||||
<Paste your changelog here>
|
||||
```
|
||||
|
||||
</details>
|
11
docs/includes/showcase.md
Normal file
11
docs/includes/showcase.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
This section will highlight a few projects from members of the community.
|
||||
!!! Note
|
||||
The projects below are for the most part not maintained by the freqtrade , therefore use your own caution before using them.
|
||||
|
||||
- [Example freqtrade strategies](https://github.com/freqtrade/freqtrade-strategies/)
|
||||
- [FrequentHippo - Grafana dashboard with dry/live runs and backtests](http://frequenthippo.ddns.net:3000/) (by hippocritical).
|
||||
- [Online pairlist generator](https://remotepairlist.com/) (by Blood4rc).
|
||||
- [Freqtrade Backtesting Project](https://bt.robot.co.network/) (by Blood4rc).
|
||||
- [Freqtrade analysis notebook](https://github.com/froggleston/freqtrade_analysis_notebook) (by Froggleston).
|
||||
- [TUI for freqtrade](https://github.com/froggleston/freqtrade-frogtrade9000) (by Froggleston).
|
||||
- [Bot Academy](https://botacademy.ddns.net/) (by stash86) - Blog about crypto bot projects.
|
|
@ -63,6 +63,10 @@ Exchanges confirmed working by the community:
|
|||
- [X] [Bitvavo](https://bitvavo.com/)
|
||||
- [X] [Kucoin](https://www.kucoin.com/)
|
||||
|
||||
## Community showcase
|
||||
|
||||
--8<-- "includes/showcase.md"
|
||||
|
||||
## Requirements
|
||||
|
||||
### Hardware requirements
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
markdown==3.3.7
|
||||
mkdocs==1.4.3
|
||||
mkdocs-material==9.1.16
|
||||
markdown==3.4.4
|
||||
mkdocs==1.5.2
|
||||
mkdocs-material==9.1.21
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==10.0.1
|
||||
pymdown-extensions==10.1
|
||||
jinja2==3.1.2
|
||||
|
|
|
@ -750,7 +750,7 @@ class DigDeeperStrategy(IStrategy):
|
|||
# Hope you have a deep wallet!
|
||||
try:
|
||||
# This returns first order stake size
|
||||
stake_amount = filled_entries[0].cost
|
||||
stake_amount = filled_entries[0].stake_amount
|
||||
# This then calculates current safety order size
|
||||
stake_amount = stake_amount * (1 + (count_of_entries * 0.25))
|
||||
return stake_amount
|
||||
|
|
|
@ -800,8 +800,8 @@ class MyCoolFreqaiModel(BaseRegressionModel):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
# ... your custom code
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -287,12 +287,17 @@ Return a summary of your profit/loss and performance.
|
|||
> **Best Performing:** `PAY/BTC: 50.23%`
|
||||
> **Trading volume:** `0.5 BTC`
|
||||
> **Profit factor:** `1.04`
|
||||
> **Win / Loss:** `102 / 36`
|
||||
> **Winrate:** `73.91%`
|
||||
> **Expectancy (Ratio):** `4.87 (1.66)`
|
||||
> **Max Drawdown:** `9.23% (0.01255 BTC)`
|
||||
|
||||
The relative profit of `1.2%` is the average profit per trade.
|
||||
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
|
||||
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
|
||||
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
|
||||
Expectancy corresponds to the average return per currency unit at risk, i.e. the winrate and the risk-reward ratio (the average gain of winning trades compared to the average loss of losing trades).
|
||||
Expectancy Ratio is expected profit or loss of a subsequent trade based on the performance of all past trades.
|
||||
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
|
||||
Bot started date will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
|
||||
|
||||
|
|
|
@ -141,7 +141,8 @@ Most properties here can be None as they are dependant on the exchange response.
|
|||
`amount` | float | Amount in base currency
|
||||
`filled` | float | Filled amount (in base currency)
|
||||
`remaining` | float | Remaining amount
|
||||
`cost` | float | Cost of the order - usually average * filled
|
||||
`cost` | float | Cost of the order - usually average * filled (*Exchange dependant on futures, may contain the cost with or without leverage and may be in contracts.*)
|
||||
`stake_amount` | float | Stake amount used for this order. *Added in 2023.7.*
|
||||
`order_date` | datetime | Order creation date **use `order_date_utc` instead**
|
||||
`order_date_utc` | datetime | Order creation date (in UTC)
|
||||
`order_fill_date` | datetime | Order fill date **use `order_fill_utc` instead**
|
||||
|
|
|
@ -80,12 +80,18 @@ When using the Form-Encoded or JSON-Encoded configuration you can configure any
|
|||
|
||||
The result would be a POST request with e.g. `Status: running` body and `Content-Type: text/plain` header.
|
||||
|
||||
Optional parameters are available to enable automatic retries for webhook messages. The `webhook.retries` parameter can be set for the maximum number of retries the webhook request should attempt if it is unsuccessful (i.e. HTTP response status is not 200). By default this is set to `0` which is disabled. An additional `webhook.retry_delay` parameter can be set to specify the time in seconds between retry attempts. By default this is set to `0.1` (i.e. 100ms). Note that increasing the number of retries or retry delay may slow down the trader if there are connectivity issues with the webhook. Example configuration for retries:
|
||||
## Additional configurations
|
||||
|
||||
The `webhook.retries` parameter can be set for the maximum number of retries the webhook request should attempt if it is unsuccessful (i.e. HTTP response status is not 200). By default this is set to `0` which is disabled. An additional `webhook.retry_delay` parameter can be set to specify the time in seconds between retry attempts. By default this is set to `0.1` (i.e. 100ms). Note that increasing the number of retries or retry delay may slow down the trader if there are connectivity issues with the webhook.
|
||||
You can also specify `webhook.timeout` - which defines how long the bot will wait until it assumes the other host as unresponsive (defaults to 10s).
|
||||
|
||||
Example configuration for retries:
|
||||
|
||||
```json
|
||||
"webhook": {
|
||||
"enabled": true,
|
||||
"url": "https://<YOURHOOKURL>",
|
||||
"timeout": 10,
|
||||
"retries": 3,
|
||||
"retry_delay": 0.2,
|
||||
"status": {
|
||||
|
@ -109,6 +115,8 @@ Custom messages can be sent to Webhook endpoints via the `self.dp.send_msg()` fu
|
|||
|
||||
Different payloads can be configured for different events. Not all fields are necessary, but you should configure at least one of the dicts, otherwise the webhook will never be called.
|
||||
|
||||
## Webhook Message types
|
||||
|
||||
### Entry
|
||||
|
||||
The fields in `webhook.entry` are filled when the bot executes a long/short. Parameters are filled using string.format.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
""" Freqtrade bot """
|
||||
__version__ = '2023.6.dev'
|
||||
__version__ = '2023.8.dev'
|
||||
|
||||
if 'dev' in __version__:
|
||||
from pathlib import Path
|
||||
|
|
|
@ -67,8 +67,7 @@ ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "template"]
|
|||
|
||||
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
|
||||
|
||||
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode",
|
||||
"candle_types"]
|
||||
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"]
|
||||
|
||||
ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades"]
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ from typing import Any, Dict, List
|
|||
|
||||
from questionary import Separator, prompt
|
||||
|
||||
from freqtrade.configuration.detect_environment import running_in_docker
|
||||
from freqtrade.configuration.directory_operations import chown_user_directory
|
||||
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
@ -179,7 +180,7 @@ def ask_user_config() -> Dict[str, Any]:
|
|||
"name": "api_server_listen_addr",
|
||||
"message": ("Insert Api server Listen Address (0.0.0.0 for docker, "
|
||||
"otherwise best left untouched)"),
|
||||
"default": "127.0.0.1",
|
||||
"default": "127.0.0.1" if not running_in_docker() else "0.0.0.0",
|
||||
"when": lambda x: x['api_server']
|
||||
},
|
||||
{
|
||||
|
|
|
@ -381,7 +381,7 @@ AVAILABLE_CLI_OPTIONS = {
|
|||
),
|
||||
"candle_types": Arg(
|
||||
'--candle-types',
|
||||
help='Select candle type to use',
|
||||
help='Select candle type to convert. Defaults to all available types.',
|
||||
choices=[c.value for c in CandleType],
|
||||
nargs='+',
|
||||
),
|
||||
|
@ -435,12 +435,12 @@ AVAILABLE_CLI_OPTIONS = {
|
|||
),
|
||||
"dataformat_ohlcv": Arg(
|
||||
'--data-format-ohlcv',
|
||||
help='Storage format for downloaded candle (OHLCV) data. (default: `json`).',
|
||||
help='Storage format for downloaded candle (OHLCV) data. (default: `feather`).',
|
||||
choices=constants.AVAILABLE_DATAHANDLERS,
|
||||
),
|
||||
"dataformat_trades": Arg(
|
||||
'--data-format-trades',
|
||||
help='Storage format for downloaded trades data. (default: `jsongz`).',
|
||||
help='Storage format for downloaded trades data. (default: `feather`).',
|
||||
choices=constants.AVAILABLE_DATAHANDLERS_TRADES,
|
||||
),
|
||||
"show_timerange": Arg(
|
||||
|
@ -450,14 +450,12 @@ AVAILABLE_CLI_OPTIONS = {
|
|||
),
|
||||
"exchange": Arg(
|
||||
'--exchange',
|
||||
help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). '
|
||||
f'Only valid if no config is provided.',
|
||||
help='Exchange name. Only valid if no config is provided.',
|
||||
),
|
||||
"timeframes": Arg(
|
||||
'-t', '--timeframes',
|
||||
help='Specify which tickers to download. Space-separated list. '
|
||||
'Default: `1m 5m`.',
|
||||
default=['1m', '5m'],
|
||||
nargs='+',
|
||||
),
|
||||
"prepend_data": Arg(
|
||||
|
|
|
@ -4,10 +4,10 @@ from collections import defaultdict
|
|||
from typing import Any, Dict
|
||||
|
||||
from freqtrade.configuration import TimeRange, setup_utils_configuration
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, Config
|
||||
from freqtrade.data.converter import convert_ohlcv_format, convert_trades_format
|
||||
from freqtrade.data.history import convert_trades_to_ohlcv, download_data_main
|
||||
from freqtrade.enums import CandleType, RunMode, TradingMode
|
||||
from freqtrade.enums import RunMode, TradingMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||
|
@ -57,6 +57,8 @@ def start_convert_trades(args: Dict[str, Any]) -> None:
|
|||
raise OperationalException(
|
||||
"Downloading data requires a list of pairs. "
|
||||
"Please check the documentation on how to configure this.")
|
||||
if 'timeframes' not in config:
|
||||
config['timeframes'] = DL_DATA_TIMEFRAMES
|
||||
|
||||
# Init exchange
|
||||
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||
|
@ -86,11 +88,10 @@ def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
|
|||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
||||
if ohlcv:
|
||||
migrate_binance_futures_data(config)
|
||||
candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', ['spot'])]
|
||||
for candle_type in candle_types:
|
||||
convert_ohlcv_format(config,
|
||||
convert_from=args['format_from'], convert_to=args['format_to'],
|
||||
erase=args['erase'], candle_type=candle_type)
|
||||
convert_ohlcv_format(config,
|
||||
convert_from=args['format_from'],
|
||||
convert_to=args['format_to'],
|
||||
erase=args['erase'])
|
||||
else:
|
||||
convert_trades_format(config,
|
||||
convert_from=args['format_from'], convert_to=args['format_to'],
|
||||
|
|
|
@ -33,11 +33,11 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
|
|||
else:
|
||||
headers = {
|
||||
'name': 'Exchange name',
|
||||
'valid': 'Valid',
|
||||
'supported': 'Supported',
|
||||
'trade_modes': 'Markets',
|
||||
'comment': 'Reason',
|
||||
}
|
||||
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
|
||||
|
||||
def build_entry(exchange: ValidExchangesType, valid: bool):
|
||||
valid_entry = {'valid': exchange['valid']} if valid else {}
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
from freqtrade.configuration.config_setup import setup_utils_configuration
|
||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||
from freqtrade.configuration.configuration import Configuration
|
||||
from freqtrade.configuration.detect_environment import running_in_docker
|
||||
from freqtrade.configuration.timerange import TimeRange
|
||||
|
|
8
freqtrade/configuration/detect_environment.py
Normal file
8
freqtrade/configuration/detect_environment.py
Normal file
|
@ -0,0 +1,8 @@
|
|||
import os
|
||||
|
||||
|
||||
def running_in_docker() -> bool:
|
||||
"""
|
||||
Check if we are running in a docker container
|
||||
"""
|
||||
return os.environ.get('FT_APP_ENV') == 'docker'
|
|
@ -3,6 +3,7 @@ import shutil
|
|||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from freqtrade.configuration.detect_environment import running_in_docker
|
||||
from freqtrade.constants import (USER_DATA_FILES, USERPATH_FREQAIMODELS, USERPATH_HYPEROPTS,
|
||||
USERPATH_NOTEBOOKS, USERPATH_STRATEGIES, Config)
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
@ -30,8 +31,7 @@ def chown_user_directory(directory: Path) -> None:
|
|||
Use Sudo to change permissions of the home-directory if necessary
|
||||
Only applies when running in docker!
|
||||
"""
|
||||
import os
|
||||
if os.environ.get('FT_APP_ENV') == 'docker':
|
||||
if running_in_docker():
|
||||
try:
|
||||
import subprocess
|
||||
subprocess.check_output(
|
||||
|
|
|
@ -6,6 +6,8 @@ import re
|
|||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
||||
|
@ -107,15 +109,15 @@ class TimeRange:
|
|||
self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)
|
||||
self.starttype = 'date'
|
||||
|
||||
@staticmethod
|
||||
def parse_timerange(text: Optional[str]) -> 'TimeRange':
|
||||
@classmethod
|
||||
def parse_timerange(cls, text: Optional[str]) -> Self:
|
||||
"""
|
||||
Parse the value of the argument --timerange to determine what is the range desired
|
||||
:param text: value from --timerange
|
||||
:return: Start and End range period
|
||||
"""
|
||||
if not text:
|
||||
return TimeRange(None, None, 0, 0)
|
||||
return cls(None, None, 0, 0)
|
||||
syntax = [(r'^-(\d{8})$', (None, 'date')),
|
||||
(r'^(\d{8})-$', ('date', None)),
|
||||
(r'^(\d{8})-(\d{8})$', ('date', 'date')),
|
||||
|
@ -156,5 +158,5 @@ class TimeRange:
|
|||
if start > stop > 0:
|
||||
raise OperationalException(
|
||||
f'Start date is after stop date for timerange "{text}"')
|
||||
return TimeRange(stype[0], stype[1], start, stop)
|
||||
return cls(stype[0], stype[1], start, stop)
|
||||
raise OperationalException(f'Incorrect syntax for timerange "{text}"')
|
||||
|
|
|
@ -10,7 +10,6 @@ from freqtrade.enums import CandleType, PriceType, RPCMessageType
|
|||
|
||||
DOCS_LINK = "https://www.freqtrade.io/en/stable"
|
||||
DEFAULT_CONFIG = 'config.json'
|
||||
DEFAULT_EXCHANGE = 'bittrex'
|
||||
PROCESS_THROTTLE_SECS = 5 # sec
|
||||
HYPEROPT_EPOCH = 100 # epochs
|
||||
RETRY_TIMEOUT = 30 # sec
|
||||
|
@ -66,6 +65,7 @@ TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
|||
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
||||
FULL_DATAFRAME_THRESHOLD = 100
|
||||
CUSTOM_TAG_MAX_LENGTH = 255
|
||||
DL_DATA_TIMEFRAMES = ['1m', '5m']
|
||||
|
||||
ENV_VAR_PREFIX = 'FREQTRADE__'
|
||||
|
||||
|
@ -112,6 +112,8 @@ MINIMAL_CONFIG = {
|
|||
}
|
||||
}
|
||||
|
||||
__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}
|
||||
|
||||
# Required json-schema for user specified config
|
||||
CONF_SCHEMA = {
|
||||
'type': 'object',
|
||||
|
@ -151,7 +153,7 @@ CONF_SCHEMA = {
|
|||
},
|
||||
},
|
||||
'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},
|
||||
'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True, 'minimum': -1},
|
||||
'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True},
|
||||
'trailing_stop': {'type': 'boolean'},
|
||||
'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1},
|
||||
'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1},
|
||||
|
@ -354,7 +356,8 @@ CONF_SCHEMA = {
|
|||
'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},
|
||||
'retries': {'type': 'integer', 'minimum': 0},
|
||||
'retry_delay': {'type': 'number', 'minimum': 0},
|
||||
**dict([(x, {'type': 'object'}) for x in RPCMessageType]),
|
||||
**__MESSAGE_TYPE_DICT,
|
||||
# **{x: {'type': 'object'} for x in RPCMessageType},
|
||||
# Below -> Deprecated
|
||||
'webhookentry': {'type': 'object'},
|
||||
'webhookentrycancel': {'type': 'object'},
|
||||
|
@ -443,12 +446,12 @@ CONF_SCHEMA = {
|
|||
'dataformat_ohlcv': {
|
||||
'type': 'string',
|
||||
'enum': AVAILABLE_DATAHANDLERS,
|
||||
'default': 'json'
|
||||
'default': 'feather'
|
||||
},
|
||||
'dataformat_trades': {
|
||||
'type': 'string',
|
||||
'enum': AVAILABLE_DATAHANDLERS_TRADES,
|
||||
'default': 'jsongz'
|
||||
'default': 'feather'
|
||||
},
|
||||
'position_adjustment_enable': {'type': 'boolean'},
|
||||
'max_entry_position_adjustment': {'type': ['integer', 'number'], 'minimum': -1},
|
||||
|
|
|
@ -5,7 +5,7 @@ import logging
|
|||
from copy import copy
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
@ -15,6 +15,7 @@ from freqtrade.exceptions import OperationalException
|
|||
from freqtrade.misc import json_load
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
from freqtrade.persistence import LocalTrade, Trade, init_db
|
||||
from freqtrade.types import BacktestHistoryEntryType, BacktestResultType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -128,7 +129,7 @@ def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]:
|
|||
raise OperationalException('Unexpected error while loading backtest metadata.') from e
|
||||
|
||||
|
||||
def load_backtest_stats(filename: Union[Path, str]) -> Dict[str, Any]:
|
||||
def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
|
||||
"""
|
||||
Load backtest statistics file.
|
||||
:param filename: pathlib.Path object, or string pointing to the file.
|
||||
|
@ -147,21 +148,21 @@ def load_backtest_stats(filename: Union[Path, str]) -> Dict[str, Any]:
|
|||
# Legacy list format does not contain metadata.
|
||||
if isinstance(data, dict):
|
||||
data['metadata'] = load_backtest_metadata(filename)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def load_and_merge_backtest_result(strategy_name: str, filename: Path, results: Dict[str, Any]):
|
||||
"""
|
||||
Load one strategy from multi-strategy result
|
||||
and merge it with results
|
||||
Load one strategy from multi-strategy result and merge it with results
|
||||
:param strategy_name: Name of the strategy contained in the result
|
||||
:param filename: Backtest-result-filename to load
|
||||
:param results: dict to merge the result to.
|
||||
"""
|
||||
bt_data = load_backtest_stats(filename)
|
||||
for k in ('metadata', 'strategy'):
|
||||
k: Literal['metadata', 'strategy']
|
||||
for k in ('metadata', 'strategy'): # type: ignore
|
||||
results[k][strategy_name] = bt_data[k][strategy_name]
|
||||
results['metadata'][strategy_name]['filename'] = filename.stem
|
||||
comparison = bt_data['strategy_comparison']
|
||||
for i in range(len(comparison)):
|
||||
if comparison[i]['key'] == strategy_name:
|
||||
|
@ -170,27 +171,36 @@ def load_and_merge_backtest_result(strategy_name: str, filename: Path, results:
|
|||
|
||||
|
||||
def _get_backtest_files(dirname: Path) -> List[Path]:
|
||||
# Weird glob expression here avoids including .meta.json files.
|
||||
return list(reversed(sorted(dirname.glob('backtest-result-*-[0-9][0-9].json'))))
|
||||
|
||||
|
||||
def get_backtest_resultlist(dirname: Path):
|
||||
def get_backtest_resultlist(dirname: Path) -> List[BacktestHistoryEntryType]:
|
||||
"""
|
||||
Get list of backtest results read from metadata files
|
||||
"""
|
||||
results = []
|
||||
for filename in _get_backtest_files(dirname):
|
||||
metadata = load_backtest_metadata(filename)
|
||||
if not metadata:
|
||||
continue
|
||||
for s, v in metadata.items():
|
||||
results.append({
|
||||
'filename': filename.name,
|
||||
'strategy': s,
|
||||
'run_id': v['run_id'],
|
||||
'backtest_start_time': v['backtest_start_time'],
|
||||
return [
|
||||
{
|
||||
'filename': filename.stem,
|
||||
'strategy': s,
|
||||
'run_id': v['run_id'],
|
||||
'backtest_start_time': v['backtest_start_time'],
|
||||
}
|
||||
for filename in _get_backtest_files(dirname)
|
||||
for s, v in load_backtest_metadata(filename).items()
|
||||
if v
|
||||
]
|
||||
|
||||
})
|
||||
return results
|
||||
|
||||
def delete_backtest_result(file_abs: Path):
|
||||
"""
|
||||
Delete backtest result file and corresponding metadata file.
|
||||
"""
|
||||
# *.meta.json
|
||||
logger.info(f"Deleting backtest result file: {file_abs.name}")
|
||||
file_abs_meta = file_abs.with_suffix('.meta.json')
|
||||
file_abs.unlink()
|
||||
file_abs_meta.unlink()
|
||||
|
||||
|
||||
def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, str],
|
||||
|
@ -211,7 +221,6 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s
|
|||
'strategy_comparison': [],
|
||||
}
|
||||
|
||||
# Weird glob expression here avoids including .meta.json files.
|
||||
for filename in _get_backtest_files(dirname):
|
||||
metadata = load_backtest_metadata(filename)
|
||||
if not metadata:
|
||||
|
|
|
@ -11,7 +11,7 @@ import pandas as pd
|
|||
from pandas import DataFrame, to_datetime
|
||||
|
||||
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, Config, TradeList
|
||||
from freqtrade.enums import CandleType
|
||||
from freqtrade.enums import CandleType, TradingMode
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -96,8 +96,14 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
|
|||
'volume': 'sum'
|
||||
}
|
||||
timeframe_minutes = timeframe_to_minutes(timeframe)
|
||||
resample_interval = f'{timeframe_minutes}min'
|
||||
if timeframe_minutes >= 43200 and timeframe_minutes < 525600:
|
||||
# Monthly candles need special treatment to stick to the 1st of the month
|
||||
resample_interval = f'{timeframe}S'
|
||||
elif timeframe_minutes > 43200:
|
||||
resample_interval = timeframe
|
||||
# Resample to create "NAN" values
|
||||
df = dataframe.resample(f'{timeframe_minutes}min', on='date').agg(ohlcv_dict)
|
||||
df = dataframe.resample(resample_interval, on='date').agg(ohlcv_dict)
|
||||
|
||||
# Forwardfill close for missing columns
|
||||
df['close'] = df['close'].fillna(method='ffill')
|
||||
|
@ -122,7 +128,7 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
|
|||
return df
|
||||
|
||||
|
||||
def trim_dataframe(df: DataFrame, timerange, df_date_col: str = 'date',
|
||||
def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date',
|
||||
startup_candles: int = 0) -> DataFrame:
|
||||
"""
|
||||
Trim dataframe based on given timerange
|
||||
|
@ -264,7 +270,6 @@ def convert_ohlcv_format(
|
|||
convert_from: str,
|
||||
convert_to: str,
|
||||
erase: bool,
|
||||
candle_type: CandleType
|
||||
):
|
||||
"""
|
||||
Convert OHLCV from one format to another
|
||||
|
@ -272,7 +277,6 @@ def convert_ohlcv_format(
|
|||
:param convert_from: Source format
|
||||
:param convert_to: Target format
|
||||
:param erase: Erase source data (does not apply if source and target format are identical)
|
||||
:param candle_type: Any of the enum CandleType (must match trading mode!)
|
||||
"""
|
||||
from freqtrade.data.history.idatahandler import get_datahandler
|
||||
src = get_datahandler(config['datadir'], convert_from)
|
||||
|
@ -280,37 +284,45 @@ def convert_ohlcv_format(
|
|||
timeframes = config.get('timeframes', [config.get('timeframe')])
|
||||
logger.info(f"Converting candle (OHLCV) for timeframe {timeframes}")
|
||||
|
||||
if 'pairs' not in config:
|
||||
config['pairs'] = []
|
||||
# Check timeframes or fall back to timeframe.
|
||||
for timeframe in timeframes:
|
||||
config['pairs'].extend(src.ohlcv_get_pairs(
|
||||
config['datadir'],
|
||||
timeframe,
|
||||
candle_type=candle_type
|
||||
))
|
||||
config['pairs'] = sorted(set(config['pairs']))
|
||||
logger.info(f"Converting candle (OHLCV) data for {config['pairs']}")
|
||||
candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', [
|
||||
c.value for c in CandleType])]
|
||||
logger.info(candle_types)
|
||||
paircombs = src.ohlcv_get_available_data(config['datadir'], TradingMode.SPOT)
|
||||
paircombs.extend(src.ohlcv_get_available_data(config['datadir'], TradingMode.FUTURES))
|
||||
|
||||
for timeframe in timeframes:
|
||||
for pair in config['pairs']:
|
||||
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
|
||||
timerange=None,
|
||||
fill_missing=False,
|
||||
drop_incomplete=False,
|
||||
startup_candles=0,
|
||||
candle_type=candle_type)
|
||||
logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}")
|
||||
if len(data) > 0:
|
||||
trg.ohlcv_store(
|
||||
pair=pair,
|
||||
timeframe=timeframe,
|
||||
data=data,
|
||||
candle_type=candle_type
|
||||
)
|
||||
if erase and convert_from != convert_to:
|
||||
logger.info(f"Deleting source data for {pair} / {timeframe}")
|
||||
src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type)
|
||||
if 'pairs' in config:
|
||||
# Filter pairs
|
||||
paircombs = [comb for comb in paircombs if comb[0] in config['pairs']]
|
||||
|
||||
if 'timeframes' in config:
|
||||
paircombs = [comb for comb in paircombs if comb[1] in config['timeframes']]
|
||||
paircombs = [comb for comb in paircombs if comb[2] in candle_types]
|
||||
|
||||
paircombs = sorted(paircombs, key=lambda x: (x[0], x[1], x[2].value))
|
||||
|
||||
formatted_paircombs = '\n'.join([f"{pair}, {timeframe}, {candle_type}"
|
||||
for pair, timeframe, candle_type in paircombs])
|
||||
|
||||
logger.info(f"Converting candle (OHLCV) data for the following pair combinations:\n"
|
||||
f"{formatted_paircombs}")
|
||||
for pair, timeframe, candle_type in paircombs:
|
||||
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
|
||||
timerange=None,
|
||||
fill_missing=False,
|
||||
drop_incomplete=False,
|
||||
startup_candles=0,
|
||||
candle_type=candle_type)
|
||||
logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}")
|
||||
if len(data) > 0:
|
||||
trg.ohlcv_store(
|
||||
pair=pair,
|
||||
timeframe=timeframe,
|
||||
data=data,
|
||||
candle_type=candle_type
|
||||
)
|
||||
if erase and convert_from != convert_to:
|
||||
logger.info(f"Deleting source data for {pair} / {timeframe}")
|
||||
src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type)
|
||||
|
||||
|
||||
def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:
|
||||
|
|
|
@ -310,7 +310,7 @@ class DataProvider:
|
|||
timeframe=timeframe or self._config['timeframe'],
|
||||
datadir=self._config['datadir'],
|
||||
timerange=timerange,
|
||||
data_format=self._config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self._config['dataformat_ohlcv'],
|
||||
candle_type=_candle_type,
|
||||
|
||||
)
|
||||
|
|
|
@ -7,15 +7,16 @@ from typing import Dict, List, Optional, Tuple
|
|||
from pandas import DataFrame, concat
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS, Config
|
||||
from freqtrade.constants import (DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS,
|
||||
DL_DATA_TIMEFRAMES, Config)
|
||||
from freqtrade.data.converter import (clean_ohlcv_dataframe, ohlcv_to_dataframe,
|
||||
trades_remove_duplicates, trades_to_ohlcv)
|
||||
from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
|
||||
from freqtrade.enums import CandleType
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import Exchange
|
||||
from freqtrade.misc import format_ms_time
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
|
||||
from freqtrade.util import format_ms_time
|
||||
from freqtrade.util.binance_mig import migrate_binance_futures_data
|
||||
|
||||
|
||||
|
@ -68,7 +69,7 @@ def load_data(datadir: Path,
|
|||
fill_up_missing: bool = True,
|
||||
startup_candles: int = 0,
|
||||
fail_without_data: bool = False,
|
||||
data_format: str = 'json',
|
||||
data_format: str = 'feather',
|
||||
candle_type: CandleType = CandleType.SPOT,
|
||||
user_futures_funding_rate: Optional[int] = None,
|
||||
) -> Dict[str, DataFrame]:
|
||||
|
@ -354,7 +355,7 @@ def _download_trades_history(exchange: Exchange,
|
|||
trades = []
|
||||
|
||||
if not since:
|
||||
since = int((datetime.now() - timedelta(days=-new_pairs_days)).timestamp()) * 1000
|
||||
since = int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000
|
||||
|
||||
from_id = trades[-1][1] if trades else None
|
||||
if trades and since < trades[-1][0]:
|
||||
|
@ -393,7 +394,7 @@ def _download_trades_history(exchange: Exchange,
|
|||
|
||||
def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: Path,
|
||||
timerange: TimeRange, new_pairs_days: int = 30,
|
||||
erase: bool = False, data_format: str = 'jsongz') -> List[str]:
|
||||
erase: bool = False, data_format: str = 'feather') -> List[str]:
|
||||
"""
|
||||
Refresh stored trades data for backtesting and hyperopt operations.
|
||||
Used by freqtrade download-data subcommand.
|
||||
|
@ -426,8 +427,8 @@ def convert_trades_to_ohlcv(
|
|||
datadir: Path,
|
||||
timerange: TimeRange,
|
||||
erase: bool = False,
|
||||
data_format_ohlcv: str = 'json',
|
||||
data_format_trades: str = 'jsongz',
|
||||
data_format_ohlcv: str = 'feather',
|
||||
data_format_trades: str = 'feather',
|
||||
candle_type: CandleType = CandleType.SPOT
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -512,6 +513,8 @@ def download_data_main(config: Config) -> None:
|
|||
]
|
||||
|
||||
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
|
||||
if 'timeframes' not in config:
|
||||
config['timeframes'] = DL_DATA_TIMEFRAMES
|
||||
|
||||
# Manual validations of relevant settings
|
||||
if not config['exchange'].get('skip_pair_validation', False):
|
||||
|
|
|
@ -427,6 +427,6 @@ def get_datahandler(datadir: Path, data_format: Optional[str] = None,
|
|||
"""
|
||||
|
||||
if not data_handler:
|
||||
HandlerClass = get_datahandlerclass(data_format or 'json')
|
||||
HandlerClass = get_datahandlerclass(data_format or 'feather')
|
||||
data_handler = HandlerClass(datadir)
|
||||
return data_handler
|
||||
|
|
|
@ -194,32 +194,35 @@ def calculate_cagr(days_passed: int, starting_balance: float, final_balance: flo
|
|||
return (final_balance / starting_balance) ** (1 / (days_passed / 365)) - 1
|
||||
|
||||
|
||||
def calculate_expectancy(trades: pd.DataFrame) -> float:
|
||||
def calculate_expectancy(trades: pd.DataFrame) -> Tuple[float, float]:
|
||||
"""
|
||||
Calculate expectancy
|
||||
:param trades: DataFrame containing trades (requires columns close_date and profit_abs)
|
||||
:return: expectancy
|
||||
:return: expectancy, expectancy_ratio
|
||||
"""
|
||||
if len(trades) == 0:
|
||||
return 0
|
||||
|
||||
expectancy = 1
|
||||
expectancy = 0
|
||||
expectancy_ratio = 100
|
||||
|
||||
profit_sum = trades.loc[trades['profit_abs'] > 0, 'profit_abs'].sum()
|
||||
loss_sum = abs(trades.loc[trades['profit_abs'] < 0, 'profit_abs'].sum())
|
||||
nb_win_trades = len(trades.loc[trades['profit_abs'] > 0])
|
||||
nb_loss_trades = len(trades.loc[trades['profit_abs'] < 0])
|
||||
if len(trades) > 0:
|
||||
winning_trades = trades.loc[trades['profit_abs'] > 0]
|
||||
losing_trades = trades.loc[trades['profit_abs'] < 0]
|
||||
profit_sum = winning_trades['profit_abs'].sum()
|
||||
loss_sum = abs(losing_trades['profit_abs'].sum())
|
||||
nb_win_trades = len(winning_trades)
|
||||
nb_loss_trades = len(losing_trades)
|
||||
|
||||
if (nb_win_trades > 0) and (nb_loss_trades > 0):
|
||||
average_win = profit_sum / nb_win_trades
|
||||
average_loss = loss_sum / nb_loss_trades
|
||||
risk_reward_ratio = average_win / average_loss
|
||||
winrate = nb_win_trades / len(trades)
|
||||
expectancy = ((1 + risk_reward_ratio) * winrate) - 1
|
||||
elif nb_win_trades == 0:
|
||||
expectancy = 0
|
||||
average_win = (profit_sum / nb_win_trades) if nb_win_trades > 0 else 0
|
||||
average_loss = (loss_sum / nb_loss_trades) if nb_loss_trades > 0 else 0
|
||||
winrate = (nb_win_trades / len(trades))
|
||||
loserate = (nb_loss_trades / len(trades))
|
||||
|
||||
return expectancy
|
||||
expectancy = (winrate * average_win) - (loserate * average_loss)
|
||||
if (average_loss > 0):
|
||||
risk_reward_ratio = average_win / average_loss
|
||||
expectancy_ratio = ((1 + risk_reward_ratio) * winrate) - 1
|
||||
|
||||
return expectancy, expectancy_ratio
|
||||
|
||||
|
||||
def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: datetime,
|
||||
|
|
|
@ -115,7 +115,7 @@ class Edge:
|
|||
exchange=self.exchange,
|
||||
timeframe=self.strategy.timeframe,
|
||||
timerange=timerange_startup,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
|
||||
)
|
||||
# Download informative pairs too
|
||||
|
@ -132,7 +132,7 @@ class Edge:
|
|||
exchange=self.exchange,
|
||||
timeframe=timeframe,
|
||||
timerange=timerange_startup,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
|
||||
)
|
||||
|
||||
|
@ -142,7 +142,7 @@ class Edge:
|
|||
timeframe=self.strategy.timeframe,
|
||||
timerange=self._timerange,
|
||||
startup_candles=self.strategy.startup_candle_count,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
|
||||
)
|
||||
|
||||
|
@ -172,13 +172,7 @@ class Edge:
|
|||
pair_data = pair_data.sort_values(by=['date'])
|
||||
pair_data = pair_data.reset_index(drop=True)
|
||||
|
||||
df_analyzed = self.strategy.advise_exit(
|
||||
dataframe=self.strategy.advise_entry(
|
||||
dataframe=pair_data,
|
||||
metadata={'pair': pair}
|
||||
),
|
||||
metadata={'pair': pair}
|
||||
)[headers].copy()
|
||||
df_analyzed = self.strategy.ft_advise_signals(pair_data, {'pair': pair})[headers].copy()
|
||||
|
||||
trades += self._find_trades_for_stoploss_range(df_analyzed, pair, self._stoploss_range)
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ class Binance(Exchange):
|
|||
"tickers_have_price": False,
|
||||
"floor_leverage": True,
|
||||
"stop_price_type_field": "workingType",
|
||||
"order_props_in_contracts": ['amount', 'cost', 'filled', 'remaining'],
|
||||
"stop_price_type_value_mapping": {
|
||||
PriceType.LAST: "CONTRACT_PRICE",
|
||||
PriceType.MARK: "MARK_PRICE",
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -28,7 +28,7 @@ class Bybit(Exchange):
|
|||
|
||||
_ft_has: Dict = {
|
||||
"ohlcv_candle_limit": 200,
|
||||
"ohlcv_has_history": False,
|
||||
"ohlcv_has_history": True,
|
||||
}
|
||||
_ft_has_futures: Dict = {
|
||||
"ohlcv_has_history": True,
|
||||
|
|
|
@ -80,9 +80,8 @@ class Exchange:
|
|||
"mark_ohlcv_price": "mark",
|
||||
"mark_ohlcv_timeframe": "8h",
|
||||
"ccxt_futures_name": "swap",
|
||||
"fee_cost_in_contracts": False, # Fee cost needs contract conversion
|
||||
"needs_trading_fees": False, # use fetch_trading_fees to cache fees
|
||||
"order_props_in_contracts": ['amount', 'cost', 'filled', 'remaining'],
|
||||
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
|
||||
# Override createMarketBuyOrderRequiresPrice where ccxt has it wrong
|
||||
"marketOrderRequiresPrice": False,
|
||||
}
|
||||
|
@ -1859,9 +1858,6 @@ class Exchange:
|
|||
if fee_curr is None:
|
||||
return None
|
||||
fee_cost = float(fee['cost'])
|
||||
if self._ft_has['fee_cost_in_contracts']:
|
||||
# Convert cost via "contracts" conversion
|
||||
fee_cost = self._contracts_to_amount(symbol, fee['cost'])
|
||||
|
||||
# Calculate fee based on order details
|
||||
if fee_curr == self.get_pair_base_currency(symbol):
|
||||
|
|
|
@ -33,8 +33,6 @@ class Gate(Exchange):
|
|||
_ft_has_futures: Dict = {
|
||||
"needs_trading_fees": True,
|
||||
"marketOrderRequiresPrice": False,
|
||||
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
|
||||
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
|
||||
"stop_price_type_field": "price_type",
|
||||
"stop_price_type_value_mapping": {
|
||||
PriceType.LAST: 0,
|
||||
|
|
|
@ -32,7 +32,6 @@ class Okx(Exchange):
|
|||
}
|
||||
_ft_has_futures: Dict = {
|
||||
"tickers_have_quoteVolume": False,
|
||||
"fee_cost_in_contracts": True,
|
||||
"stop_price_type_field": "slTriggerPxType",
|
||||
"stop_price_type_value_mapping": {
|
||||
PriceType.LAST: "last",
|
||||
|
|
|
@ -120,7 +120,7 @@ class BaseClassifierModel(IFreqaiModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -94,8 +94,8 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ class BasePyTorchRegressor(BasePyTorchModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
def train(
|
||||
|
|
|
@ -114,7 +114,7 @@ class BaseRegressionModel(IFreqaiModel):
|
|||
if dk.feature_pipeline["di"]:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
|
|
@ -635,7 +635,7 @@ class FreqaiDataDrawer:
|
|||
timeframe=tf,
|
||||
pair=pair,
|
||||
timerange=timerange,
|
||||
data_format=self.config.get("dataformat_ohlcv", "json"),
|
||||
data_format=self.config.get("dataformat_ohlcv", "feather"),
|
||||
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
|
||||
)
|
||||
|
||||
|
|
|
@ -86,8 +86,6 @@ class IFreqaiModel(ABC):
|
|||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||
|
||||
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
||||
if self.ft_params.get("inlier_metric_window", 0):
|
||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||
self.class_names: List[str] = [] # used in classification subclasses
|
||||
self.pair_it = 0
|
||||
self.pair_it_train = 0
|
||||
|
@ -515,7 +513,7 @@ class IFreqaiModel(ABC):
|
|||
]
|
||||
|
||||
if ft_params.get("principal_component_analysis", False):
|
||||
pipe_steps.append(('pca', ds.PCA()))
|
||||
pipe_steps.append(('pca', ds.PCA(n_components=0.999)))
|
||||
pipe_steps.append(('post-pca-scaler',
|
||||
SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))))
|
||||
|
||||
|
@ -676,15 +674,6 @@ class IFreqaiModel(ABC):
|
|||
hist_preds_df['close_price'] = strat_df['close']
|
||||
hist_preds_df['date_pred'] = strat_df['date']
|
||||
|
||||
# # for keras type models, the conv_window needs to be prepended so
|
||||
# # viewing is correct in frequi
|
||||
if self.ft_params.get('inlier_metric_window', 0):
|
||||
n_lost_points = self.freqai_info.get('conv_width', 2)
|
||||
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
|
||||
columns=hist_preds_df.columns)
|
||||
self.dd.historic_predictions[pair] = pd.concat(
|
||||
[zeros_df, hist_preds_df], axis=0, ignore_index=True)
|
||||
|
||||
def fit_live_predictions(self, dk: FreqaiDataKitchen, pair: str) -> None:
|
||||
"""
|
||||
Fit the labels with a gaussian distribution
|
||||
|
@ -1012,6 +1001,6 @@ class IFreqaiModel(ABC):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
return
|
||||
|
|
|
@ -32,8 +32,8 @@ class LightGBMClassifier(BaseClassifierModel):
|
|||
eval_set = None
|
||||
test_weights = None
|
||||
else:
|
||||
eval_set = (data_dictionary["test_features"].to_numpy(),
|
||||
data_dictionary["test_labels"].to_numpy()[:, 0])
|
||||
eval_set = [(data_dictionary["test_features"].to_numpy(),
|
||||
data_dictionary["test_labels"].to_numpy()[:, 0])]
|
||||
test_weights = data_dictionary["test_weights"]
|
||||
X = data_dictionary["train_features"].to_numpy()
|
||||
y = data_dictionary["train_labels"].to_numpy()[:, 0]
|
||||
|
@ -42,7 +42,6 @@ class LightGBMClassifier(BaseClassifierModel):
|
|||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
model = LGBMClassifier(**self.model_training_parameters)
|
||||
|
||||
model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights,
|
||||
eval_sample_weight=[test_weights], init_model=init_model)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ class LightGBMRegressor(BaseRegressionModel):
|
|||
eval_set = None
|
||||
eval_weights = None
|
||||
else:
|
||||
eval_set = (data_dictionary["test_features"], data_dictionary["test_labels"])
|
||||
eval_set = [(data_dictionary["test_features"], data_dictionary["test_labels"])]
|
||||
eval_weights = data_dictionary["test_weights"]
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
|
|
@ -42,10 +42,10 @@ class LightGBMRegressorMultiTarget(BaseRegressionModel):
|
|||
eval_weights = [data_dictionary["test_weights"]]
|
||||
eval_sets = [(None, None)] * data_dictionary['test_labels'].shape[1] # type: ignore
|
||||
for i in range(data_dictionary['test_labels'].shape[1]):
|
||||
eval_sets[i] = ( # type: ignore
|
||||
eval_sets[i] = [( # type: ignore
|
||||
data_dictionary["test_features"],
|
||||
data_dictionary["test_labels"].iloc[:, i]
|
||||
)
|
||||
)]
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
if init_model:
|
||||
|
|
|
@ -136,8 +136,8 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
|
|||
if self.freqai_info.get("DI_threshold", 0) > 0:
|
||||
dk.DI_values = dk.feature_pipeline["di"].di_values
|
||||
else:
|
||||
dk.DI_values = np.zeros(len(outliers.index))
|
||||
dk.do_predict = outliers.to_numpy()
|
||||
dk.DI_values = np.zeros(outliers.shape[0])
|
||||
dk.do_predict = outliers
|
||||
|
||||
if x.shape[1] > 1:
|
||||
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
|
||||
|
|
|
@ -50,7 +50,7 @@ def download_all_data_for_training(dp: DataProvider, config: Config) -> None:
|
|||
timerange=timerange,
|
||||
new_pairs_days=new_pairs_days,
|
||||
erase=False,
|
||||
data_format=config.get("dataformat_ohlcv", "json"),
|
||||
data_format=config.get("dataformat_ohlcv", "feather"),
|
||||
trading_mode=config.get("trading_mode", "spot"),
|
||||
prepend=config.get("prepend_data", False),
|
||||
)
|
||||
|
|
|
@ -231,7 +231,7 @@ class FreqtradeBot(LoggingMixin):
|
|||
self.manage_open_orders()
|
||||
|
||||
# Protect from collisions with force_exit.
|
||||
# Without this, freqtrade my try to recreate stoploss_on_exchange orders
|
||||
# Without this, freqtrade may try to recreate stoploss_on_exchange orders
|
||||
# while exiting is in process, since telegram messages arrive in an different thread.
|
||||
with self._exit_lock:
|
||||
trades = Trade.get_open_trades()
|
||||
|
@ -1383,7 +1383,10 @@ class FreqtradeBot(LoggingMixin):
|
|||
latest_candle_close_date = timeframe_to_next_date(self.strategy.timeframe,
|
||||
latest_candle_open_date)
|
||||
# Check if new candle
|
||||
if order_obj and latest_candle_close_date > order_obj.order_date_utc:
|
||||
if (
|
||||
order_obj and order_obj.side == trade.entry_side
|
||||
and latest_candle_close_date > order_obj.order_date_utc
|
||||
):
|
||||
# New candle
|
||||
proposed_rate = self.exchange.get_rate(
|
||||
trade.pair, side='entry', is_short=trade.is_short, refresh=True)
|
||||
|
@ -1941,6 +1944,7 @@ class FreqtradeBot(LoggingMixin):
|
|||
"""
|
||||
Applies the fee to amount (either from Order or from Trades).
|
||||
Can eat into dust if more than the required asset is available.
|
||||
In case of trade adjustment orders, trade.amount will not have been adjusted yet.
|
||||
Can't happen in Futures mode - where Fees are always in settlement currency,
|
||||
never in base currency.
|
||||
"""
|
||||
|
@ -1950,6 +1954,10 @@ class FreqtradeBot(LoggingMixin):
|
|||
# check against remaining amount!
|
||||
amount_ = trade.amount - amount
|
||||
|
||||
if trade.nr_of_successful_entries >= 1 and order_obj.ft_order_side == trade.entry_side:
|
||||
# In case of rebuy's, trade.amount doesn't contain the amount of the last entry.
|
||||
amount_ = trade.amount + amount
|
||||
|
||||
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_:
|
||||
# Eat into dust if we own more than base currency
|
||||
logger.info(f"Fee amount for {trade} was in base currency - "
|
||||
|
@ -1979,7 +1987,11 @@ class FreqtradeBot(LoggingMixin):
|
|||
# Init variables
|
||||
order_amount = safe_value_fallback(order, 'filled', 'amount')
|
||||
# Only run for closed orders
|
||||
if trade.fee_updated(order.get('side', '')) or order['status'] == 'open':
|
||||
if (
|
||||
trade.fee_updated(order.get('side', ''))
|
||||
or order['status'] == 'open'
|
||||
or order_obj.ft_fee_base
|
||||
):
|
||||
return None
|
||||
|
||||
trade_base_currency = self.exchange.get_pair_base_currency(trade.pair)
|
||||
|
|
|
@ -3,7 +3,6 @@ Various tool function for Freqtrade and scripts
|
|||
"""
|
||||
import gzip
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
||||
from urllib.parse import urlparse
|
||||
|
@ -117,20 +116,19 @@ def file_load_json(file: Path):
|
|||
return pairdata
|
||||
|
||||
|
||||
def is_file_in_dir(file: Path, directory: Path) -> bool:
|
||||
"""
|
||||
Helper function to check if file is in directory.
|
||||
"""
|
||||
return file.is_file() and file.parent.samefile(directory)
|
||||
|
||||
|
||||
def pair_to_filename(pair: str) -> str:
|
||||
for ch in ['/', ' ', '.', '@', '$', '+', ':']:
|
||||
pair = pair.replace(ch, '_')
|
||||
return pair
|
||||
|
||||
|
||||
def format_ms_time(date: int) -> str:
|
||||
"""
|
||||
convert MS date to readable format.
|
||||
: epoch-string in ms
|
||||
"""
|
||||
return datetime.fromtimestamp(date / 1000.0).strftime('%Y-%m-%dT%H:%M:%S')
|
||||
|
||||
|
||||
def deep_merge_dicts(source, destination, allow_null_overrides: bool = True):
|
||||
"""
|
||||
Values from Source override destination, destination is returned (and modified!!)
|
||||
|
|
|
@ -3,7 +3,7 @@ from typing import Callable
|
|||
from cachetools import TTLCache, cached
|
||||
|
||||
|
||||
class LoggingMixin():
|
||||
class LoggingMixin:
|
||||
"""
|
||||
Logging Mixin
|
||||
Shows similar messages only once every `refresh_period`.
|
||||
|
|
|
@ -39,6 +39,7 @@ from freqtrade.plugins.protectionmanager import ProtectionManager
|
|||
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
|
||||
from freqtrade.strategy.interface import IStrategy
|
||||
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
||||
from freqtrade.types import BacktestResultType, get_BacktestResultType_default
|
||||
from freqtrade.util.binance_mig import migrate_binance_futures_data
|
||||
from freqtrade.wallets import Wallets
|
||||
|
||||
|
@ -77,7 +78,7 @@ class Backtesting:
|
|||
|
||||
LoggingMixin.show_output = False
|
||||
self.config = config
|
||||
self.results: Dict[str, Any] = {}
|
||||
self.results: BacktestResultType = get_BacktestResultType_default()
|
||||
self.trade_id_counter: int = 0
|
||||
self.order_id_counter: int = 0
|
||||
|
||||
|
@ -239,7 +240,7 @@ class Backtesting:
|
|||
timerange=self.timerange,
|
||||
startup_candles=self.config['startup_candle_count'],
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT)
|
||||
)
|
||||
|
||||
|
@ -268,7 +269,7 @@ class Backtesting:
|
|||
timerange=self.timerange,
|
||||
startup_candles=0,
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=self.config.get('candle_type_def', CandleType.SPOT)
|
||||
)
|
||||
else:
|
||||
|
@ -282,7 +283,7 @@ class Backtesting:
|
|||
timerange=self.timerange,
|
||||
startup_candles=0,
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=CandleType.FUNDING_RATE
|
||||
)
|
||||
|
||||
|
@ -294,7 +295,7 @@ class Backtesting:
|
|||
timerange=self.timerange,
|
||||
startup_candles=0,
|
||||
fail_without_data=True,
|
||||
data_format=self.config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=self.config['dataformat_ohlcv'],
|
||||
candle_type=CandleType.from_string(self.exchange.get_option("mark_ohlcv_price"))
|
||||
)
|
||||
# Combine data to avoid combining the data per trade.
|
||||
|
@ -367,11 +368,7 @@ class Backtesting:
|
|||
if not pair_data.empty:
|
||||
# Cleanup from prior runs
|
||||
pair_data.drop(HEADERS[5:] + ['buy', 'sell'], axis=1, errors='ignore')
|
||||
|
||||
df_analyzed = self.strategy.advise_exit(
|
||||
self.strategy.advise_entry(pair_data, {'pair': pair}),
|
||||
{'pair': pair}
|
||||
).copy()
|
||||
df_analyzed = self.strategy.ft_advise_signals(pair_data, {'pair': pair})
|
||||
# Trim startup period from analyzed dataframe
|
||||
df_analyzed = processed[pair] = pair_data = trim_dataframe(
|
||||
df_analyzed, self.timerange, startup_candles=self.required_startup)
|
||||
|
@ -679,6 +676,7 @@ class Backtesting:
|
|||
remaining=amount,
|
||||
cost=amount * close_rate,
|
||||
)
|
||||
order._trade_bt = trade
|
||||
trade.orders.append(order)
|
||||
return trade
|
||||
|
||||
|
@ -901,8 +899,9 @@ class Backtesting:
|
|||
amount=amount,
|
||||
filled=0,
|
||||
remaining=amount,
|
||||
cost=stake_amount + trade.fee_open,
|
||||
cost=amount * propose_rate + trade.fee_open,
|
||||
)
|
||||
order._trade_bt = trade
|
||||
trade.orders.append(order)
|
||||
if pos_adjust and self._get_order_filled(order.ft_price, row):
|
||||
order.close_bt_order(current_time, trade)
|
||||
|
@ -1275,6 +1274,7 @@ class Backtesting:
|
|||
preprocessed = self.strategy.advise_all_indicators(data)
|
||||
|
||||
# Trim startup period from analyzed dataframe
|
||||
# This only used to determine if trimming would result in an empty dataframe
|
||||
preprocessed_tmp = trim_dataframes(preprocessed, timerange, self.required_startup)
|
||||
|
||||
if not preprocessed_tmp:
|
||||
|
|
|
@ -446,6 +446,8 @@ class Hyperopt:
|
|||
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
||||
|
||||
# Trim startup period from analyzed dataframe to get correct dates for output.
|
||||
# This is only used to keep track of min/max date after trimming.
|
||||
# The result is NOT returned from this method, actual trimming happens in backtesting.
|
||||
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
|
||||
self.min_date, self.max_date = get_timerange(trimmed)
|
||||
if not self.market_change:
|
||||
|
|
|
@ -35,7 +35,7 @@ def hyperopt_serializer(x):
|
|||
return str(x)
|
||||
|
||||
|
||||
class HyperoptStateContainer():
|
||||
class HyperoptStateContainer:
|
||||
""" Singleton class to track state of hyperopt"""
|
||||
state: HyperoptState = HyperoptState.OPTIMIZE
|
||||
|
||||
|
@ -44,7 +44,7 @@ class HyperoptStateContainer():
|
|||
cls.state = value
|
||||
|
||||
|
||||
class HyperoptTools():
|
||||
class HyperoptTools:
|
||||
|
||||
@staticmethod
|
||||
def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]:
|
||||
|
@ -432,12 +432,10 @@ class HyperoptTools():
|
|||
for i in range(len(trials)):
|
||||
if trials.loc[i]['is_profit']:
|
||||
for j in range(len(trials.loc[i]) - 3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Fore.GREEN,
|
||||
str(trials.loc[i][j]), Fore.RESET)
|
||||
trials.iat[i, j] = f"{Fore.GREEN}{str(trials.loc[i][j])}{Fore.RESET}"
|
||||
if trials.loc[i]['is_best'] and highlight_best:
|
||||
for j in range(len(trials.loc[i]) - 3):
|
||||
trials.iat[i, j] = "{}{}{}".format(Style.BRIGHT,
|
||||
str(trials.loc[i][j]), Style.RESET_ALL)
|
||||
trials.iat[i, j] = f"{Style.BRIGHT}{str(trials.loc[i][j])}{Style.RESET_ALL}"
|
||||
|
||||
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit', 'is_random'])
|
||||
if remove_header > 0:
|
||||
|
|
18
freqtrade/optimize/optimize_reports/__init__.py
Normal file
18
freqtrade/optimize/optimize_reports/__init__.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
# flake8: noqa: F401
|
||||
from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table,
|
||||
generate_wins_draws_losses,
|
||||
show_backtest_result,
|
||||
show_backtest_results,
|
||||
show_sorted_pairlist,
|
||||
text_table_add_metrics,
|
||||
text_table_bt_results,
|
||||
text_table_exit_reason,
|
||||
text_table_periodic_breakdown,
|
||||
text_table_strategy, text_table_tags)
|
||||
from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results,
|
||||
store_backtest_stats)
|
||||
from freqtrade.optimize.optimize_reports.optimize_reports import (
|
||||
generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats,
|
||||
generate_exit_reason_stats, generate_pair_metrics, generate_periodic_breakdown_stats,
|
||||
generate_rejected_signals, generate_strategy_comparison, generate_strategy_stats,
|
||||
generate_tag_metrics, generate_trade_signal_candles, generate_trading_stats)
|
419
freqtrade/optimize/optimize_reports/bt_output.py
Normal file
419
freqtrade/optimize/optimize_reports/bt_output.py
Normal file
|
@ -0,0 +1,419 @@
|
|||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
|
||||
from freqtrade.misc import decimals_per_coin, round_coin_value
|
||||
from freqtrade.optimize.optimize_reports.optimize_reports import generate_periodic_breakdown_stats
|
||||
from freqtrade.types import BacktestResultType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
"""
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
"""
|
||||
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
|
||||
'.2f', 'd', 's', 's']
|
||||
|
||||
|
||||
def _get_line_header(first_column: str, stake_currency: str,
|
||||
direction: str = 'Entries') -> List[str]:
|
||||
"""
|
||||
Generate header lines (goes in line with _generate_result_line())
|
||||
"""
|
||||
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
|
||||
'Win Draw Loss Win%']
|
||||
|
||||
|
||||
def generate_wins_draws_losses(wins, draws, losses):
|
||||
if wins > 0 and losses == 0:
|
||||
wl_ratio = '100'
|
||||
elif wins == 0:
|
||||
wl_ratio = '0'
|
||||
else:
|
||||
wl_ratio = f'{100.0 / (wins + draws + losses) * wins:.1f}' if losses > 0 else '100'
|
||||
return f'{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}'
|
||||
|
||||
|
||||
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
|
||||
headers = _get_line_header('Pair', stake_currency)
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
|
||||
] for t in pair_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generate small table outlining Backtest results
|
||||
:param sell_reason_stats: Exit reason metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
'Exit Reason',
|
||||
'Exits',
|
||||
'Win Draws Loss Win%',
|
||||
'Avg Profit %',
|
||||
'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Tot Profit %',
|
||||
]
|
||||
|
||||
output = [[
|
||||
t.get('exit_reason', t.get('sell_reason')), t['trades'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
|
||||
t['profit_mean_pct'], t['profit_sum_pct'],
|
||||
round_coin_value(t['profit_total_abs'], stake_currency, False),
|
||||
t['profit_total_pct'],
|
||||
] for t in exit_reason_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
if (tag_type == "enter_tag"):
|
||||
headers = _get_line_header("TAG", stake_currency)
|
||||
else:
|
||||
headers = _get_line_header("TAG", stake_currency, 'Exits')
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [
|
||||
[
|
||||
t['key'] if t['key'] is not None and len(
|
||||
t['key']) > 0 else "OTHER",
|
||||
t['trades'],
|
||||
t['profit_mean_pct'],
|
||||
t['profit_sum_pct'],
|
||||
t['profit_total_abs'],
|
||||
t['profit_total_pct'],
|
||||
t['duration_avg'],
|
||||
generate_wins_draws_losses(
|
||||
t['wins'],
|
||||
t['draws'],
|
||||
t['losses'])] for t in tag_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
|
||||
stake_currency: str, period: str) -> str:
|
||||
"""
|
||||
Generate small table with Backtest results by days
|
||||
:param days_breakdown_stats: Days breakdown metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
period.capitalize(),
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Wins',
|
||||
'Draws',
|
||||
'Losses',
|
||||
]
|
||||
output = [[
|
||||
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
|
||||
d['wins'], d['draws'], d['loses'],
|
||||
] for d in days_breakdown_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
||||
"""
|
||||
Generate summary table per strategy
|
||||
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
headers = _get_line_header('Strategy', stake_currency)
|
||||
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
|
||||
# therefore we slip this column in only for strategy summary here.
|
||||
headers.append('Drawdown')
|
||||
|
||||
# Align drawdown string on the center two space separator.
|
||||
if 'max_drawdown_account' in strategy_results[0]:
|
||||
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
|
||||
else:
|
||||
# Support for prior backtest results
|
||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||
|
||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||
for t, dd in zip(strategy_results, drawdown)]
|
||||
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
|
||||
for t, drawdown in zip(strategy_results, drawdown)]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
if len(strat_results['trades']) > 0:
|
||||
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
|
||||
short_metrics = [
|
||||
('', ''), # Empty line to improve readability
|
||||
('Long / Short',
|
||||
f"{strat_results.get('trade_count_long', 'total_trades')} / "
|
||||
f"{strat_results.get('trade_count_short', 0)}"),
|
||||
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
|
||||
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
|
||||
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
] if strat_results.get('trade_count_short', 0) > 0 else []
|
||||
|
||||
drawdown_metrics = []
|
||||
if 'max_relative_drawdown' in strat_results:
|
||||
# Compatibility to show old hyperopt results
|
||||
drawdown_metrics.append(
|
||||
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
|
||||
)
|
||||
drawdown_metrics.extend([
|
||||
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
|
||||
if 'max_drawdown_account' in strat_results else (
|
||||
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
||||
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown Start', strat_results['drawdown_start']),
|
||||
('Drawdown End', strat_results['drawdown_end']),
|
||||
])
|
||||
|
||||
entry_adjustment_metrics = [
|
||||
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
|
||||
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
|
||||
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
|
||||
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
|
||||
|
||||
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
|
||||
# command stores these results and newer version of freqtrade must be able to handle old
|
||||
# results with missing new fields.
|
||||
metrics = [
|
||||
('Backtesting from', strat_results['backtest_start']),
|
||||
('Backtesting to', strat_results['backtest_end']),
|
||||
('Max open trades', strat_results['max_open_trades']),
|
||||
('', ''), # Empty line to improve readability
|
||||
('Total/Daily Avg Trades',
|
||||
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
|
||||
|
||||
('Starting balance', round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Final balance', round_coin_value(strat_results['final_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
||||
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
|
||||
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
|
||||
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
|
||||
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
|
||||
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
|
||||
in strat_results else 'N/A'),
|
||||
('Expectancy (Ratio)', (
|
||||
f"{strat_results['expectancy']:.2f} ({strat_results['expectancy_ratio']:.2f})" if
|
||||
'expectancy_ratio' in strat_results else 'N/A')),
|
||||
('Trades per day', strat_results['trades_per_day']),
|
||||
('Avg. daily profit %',
|
||||
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
||||
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total trade volume', round_coin_value(strat_results['total_volume'],
|
||||
strat_results['stake_currency'])),
|
||||
*short_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
('Best Pair', f"{strat_results['best_pair']['key']} "
|
||||
f"{strat_results['best_pair']['profit_sum']:.2%}"),
|
||||
('Worst Pair', f"{strat_results['worst_pair']['key']} "
|
||||
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
|
||||
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
||||
('Worst trade', f"{worst_trade['pair']} "
|
||||
f"{worst_trade['profit_ratio']:.2%}"),
|
||||
|
||||
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Days win/draw/lose', f"{strat_results['winning_days']} / "
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
|
||||
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
|
||||
('Max Consecutive Wins / Loss',
|
||||
f"{strat_results['max_consecutive_wins']} / {strat_results['max_consecutive_losses']}"
|
||||
if 'max_consecutive_losses' in strat_results else 'N/A'),
|
||||
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
|
||||
('Entry/Exit Timeouts',
|
||||
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
|
||||
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
|
||||
*entry_adjustment_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
|
||||
('Min balance', round_coin_value(strat_results['csum_min'],
|
||||
strat_results['stake_currency'])),
|
||||
('Max balance', round_coin_value(strat_results['csum_max'],
|
||||
strat_results['stake_currency'])),
|
||||
|
||||
*drawdown_metrics,
|
||||
('Market change', f"{strat_results['market_change']:.2%}"),
|
||||
]
|
||||
|
||||
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
|
||||
else:
|
||||
start_balance = round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])
|
||||
stake_amount = round_coin_value(
|
||||
strat_results['stake_amount'], strat_results['stake_currency']
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
|
||||
message = ("No trades made. "
|
||||
f"Your starting balance was {start_balance}, "
|
||||
f"and your stake was {stake_amount}."
|
||||
)
|
||||
return message
|
||||
|
||||
|
||||
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
|
||||
backtest_breakdown=[]):
|
||||
"""
|
||||
Print results for one strategy
|
||||
"""
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if (results.get('results_per_enter_tag') is not None
|
||||
or results.get('results_per_buy_tag') is not None):
|
||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||
table = text_table_tags(
|
||||
"enter_tag",
|
||||
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
|
||||
stake_currency=stake_currency)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
|
||||
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
for period in backtest_breakdown:
|
||||
if period in results.get('periodic_breakdown', {}):
|
||||
days_breakdown_stats = results['periodic_breakdown'][period]
|
||||
else:
|
||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||
trade_list=results['trades'], period=period)
|
||||
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
|
||||
stake_currency=stake_currency, period=period)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def show_backtest_results(config: Config, backtest_stats: BacktestResultType):
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
show_backtest_result(
|
||||
strategy, results, stake_currency,
|
||||
config.get('backtest_breakdown', []))
|
||||
|
||||
if len(backtest_stats['strategy']) > 0:
|
||||
# Print Strategy summary table
|
||||
|
||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
||||
f" Max open trades : {results['max_open_trades']}")
|
||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print('\nFor more details, please look at the detail tables above')
|
||||
|
||||
|
||||
def show_sorted_pairlist(config: Config, backtest_stats: BacktestResultType):
|
||||
if config.get('backtest_show_pair_list', False):
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
print(f"Pairs for Strategy {strategy}: \n[")
|
||||
for result in results['results_per_pair']:
|
||||
if result["key"] != 'TOTAL':
|
||||
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
|
||||
print("]")
|
||||
|
||||
|
||||
def generate_edge_table(results: dict) -> str:
|
||||
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
|
||||
tabular_data = []
|
||||
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
|
||||
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
|
||||
'Average Duration (min)']
|
||||
|
||||
for result in results.items():
|
||||
if result[1].nb_trades > 0:
|
||||
tabular_data.append([
|
||||
result[0],
|
||||
result[1].stoploss,
|
||||
result[1].winrate,
|
||||
result[1].risk_reward_ratio,
|
||||
result[1].required_risk_reward,
|
||||
result[1].expectancy,
|
||||
result[1].nb_trades,
|
||||
round(result[1].avg_trade_duration)
|
||||
])
|
||||
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(tabular_data, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
74
freqtrade/optimize/optimize_reports/bt_storage.py
Normal file
74
freqtrade/optimize/optimize_reports/bt_storage.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||
from freqtrade.misc import file_dump_joblib, file_dump_json
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
from freqtrade.types import BacktestResultType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def store_backtest_stats(
|
||||
recordfilename: Path, stats: BacktestResultType, dtappendix: str) -> None:
|
||||
"""
|
||||
Stores backtest results
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
|
||||
).with_suffix(recordfilename.suffix)
|
||||
|
||||
# Store metadata separately.
|
||||
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
|
||||
# Don't mutate the original stats dict.
|
||||
stats_copy = {
|
||||
'strategy': stats['strategy'],
|
||||
'strategy_comparison': stats['strategy_comparison'],
|
||||
}
|
||||
|
||||
file_dump_json(filename, stats_copy)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||
|
||||
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: Dict[str, Dict],
|
||||
dtappendix: str, name: str) -> Path:
|
||||
"""
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||
)
|
||||
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def store_backtest_analysis_results(
|
||||
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||
dtappendix: str) -> None:
|
||||
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
|
@ -1,83 +1,22 @@
|
|||
import logging
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, Dict, List, Tuple, Union
|
||||
|
||||
from pandas import DataFrame, concat, to_datetime
|
||||
from tabulate import tabulate
|
||||
import numpy as np
|
||||
from pandas import DataFrame, Series, concat, to_datetime
|
||||
|
||||
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
|
||||
UNLIMITED_STAKE_AMOUNT, Config, IntOrInf)
|
||||
from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, IntOrInf
|
||||
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
|
||||
calculate_expectancy, calculate_market_change,
|
||||
calculate_max_drawdown, calculate_sharpe, calculate_sortino)
|
||||
from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
from freqtrade.misc import decimals_per_coin, round_coin_value
|
||||
from freqtrade.types import BacktestResultType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def store_backtest_stats(
|
||||
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
|
||||
"""
|
||||
Stores backtest results
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
|
||||
).with_suffix(recordfilename.suffix)
|
||||
|
||||
# Store metadata separately.
|
||||
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
|
||||
del stats['metadata']
|
||||
|
||||
file_dump_json(filename, stats)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||
|
||||
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: Dict[str, Dict],
|
||||
dtappendix: str, name: str) -> Path:
|
||||
"""
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||
)
|
||||
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def store_backtest_analysis_results(
|
||||
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||
dtappendix: str) -> None:
|
||||
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
||||
|
||||
|
||||
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
|
||||
bt_results: Dict[str, Any]) -> DataFrame:
|
||||
signal_candles_only = {}
|
||||
|
@ -120,34 +59,6 @@ def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
|
|||
return rejected_candles_only
|
||||
|
||||
|
||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
"""
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
"""
|
||||
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
|
||||
'.2f', 'd', 's', 's']
|
||||
|
||||
|
||||
def _get_line_header(first_column: str, stake_currency: str,
|
||||
direction: str = 'Entries') -> List[str]:
|
||||
"""
|
||||
Generate header lines (goes in line with _generate_result_line())
|
||||
"""
|
||||
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
|
||||
'Win Draw Loss Win%']
|
||||
|
||||
|
||||
def generate_wins_draws_losses(wins, draws, losses):
|
||||
if wins > 0 and losses == 0:
|
||||
wl_ratio = '100'
|
||||
elif wins == 0:
|
||||
wl_ratio = '0'
|
||||
else:
|
||||
wl_ratio = f'{100.0 / (wins + draws + losses) * wins:.1f}' if losses > 0 else '100'
|
||||
return f'{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}'
|
||||
|
||||
|
||||
def _generate_result_line(result: DataFrame, starting_balance: int, first_column: str) -> Dict:
|
||||
"""
|
||||
Generate one result dict, with "first_column" as key.
|
||||
|
@ -178,6 +89,7 @@ def _generate_result_line(result: DataFrame, starting_balance: int, first_column
|
|||
'wins': len(result[result['profit_abs'] > 0]),
|
||||
'draws': len(result[result['profit_abs'] == 0]),
|
||||
'losses': len(result[result['profit_abs'] < 0]),
|
||||
'winrate': len(result[result['profit_abs'] > 0]) / len(result) if len(result) else 0.0,
|
||||
}
|
||||
|
||||
|
||||
|
@ -265,6 +177,7 @@ def generate_exit_reason_stats(max_open_trades: IntOrInf, results: DataFrame) ->
|
|||
'wins': len(result[result['profit_abs'] > 0]),
|
||||
'draws': len(result[result['profit_abs'] == 0]),
|
||||
'losses': len(result[result['profit_abs'] < 0]),
|
||||
'winrate': len(result[result['profit_abs'] > 0]) / count if count else 0.0,
|
||||
'profit_mean': profit_mean,
|
||||
'profit_mean_pct': round(profit_mean * 100, 2),
|
||||
'profit_sum': profit_sum,
|
||||
|
@ -295,31 +208,6 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
|
|||
return tabular_data
|
||||
|
||||
|
||||
def generate_edge_table(results: dict) -> str:
|
||||
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
|
||||
tabular_data = []
|
||||
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
|
||||
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
|
||||
'Average Duration (min)']
|
||||
|
||||
for result in results.items():
|
||||
if result[1].nb_trades > 0:
|
||||
tabular_data.append([
|
||||
result[0],
|
||||
result[1].stoploss,
|
||||
result[1].winrate,
|
||||
result[1].risk_reward_ratio,
|
||||
result[1].required_risk_reward,
|
||||
result[1].expectancy,
|
||||
result[1].nb_trades,
|
||||
round(result[1].avg_trade_duration)
|
||||
])
|
||||
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(tabular_data, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def _get_resample_from_period(period: str) -> str:
|
||||
if period == 'day':
|
||||
return '1d'
|
||||
|
@ -344,6 +232,7 @@ def generate_periodic_breakdown_stats(trade_list: List, period: str) -> List[Dic
|
|||
wins = sum(day['profit_abs'] > 0)
|
||||
draws = sum(day['profit_abs'] == 0)
|
||||
loses = sum(day['profit_abs'] < 0)
|
||||
trades = (wins + draws + loses)
|
||||
stats.append(
|
||||
{
|
||||
'date': name.strftime('%d/%m/%Y'),
|
||||
|
@ -351,7 +240,8 @@ def generate_periodic_breakdown_stats(trade_list: List, period: str) -> List[Dic
|
|||
'profit_abs': profit_abs,
|
||||
'wins': wins,
|
||||
'draws': draws,
|
||||
'loses': loses
|
||||
'loses': loses,
|
||||
'winrate': wins / trades if trades else 0.0,
|
||||
}
|
||||
)
|
||||
return stats
|
||||
|
@ -364,6 +254,23 @@ def generate_all_periodic_breakdown_stats(trade_list: List) -> Dict[str, List]:
|
|||
return result
|
||||
|
||||
|
||||
def calc_streak(dataframe: DataFrame) -> Tuple[int, int]:
|
||||
"""
|
||||
Calculate consecutive win and loss streaks
|
||||
:param dataframe: Dataframe containing the trades dataframe, with profit_ratio column
|
||||
:return: Tuple containing consecutive wins and losses
|
||||
"""
|
||||
|
||||
df = Series(np.where(dataframe['profit_ratio'] > 0, 'win', 'loss')).to_frame('result')
|
||||
df['streaks'] = df['result'].ne(df['result'].shift()).cumsum().rename('streaks')
|
||||
df['counter'] = df['streaks'].groupby(df['streaks']).cumcount() + 1
|
||||
res = df.groupby(df['result']).max()
|
||||
#
|
||||
cons_wins = int(res.loc['win', 'counter']) if 'win' in res.index else 0
|
||||
cons_losses = int(res.loc['loss', 'counter']) if 'loss' in res.index else 0
|
||||
return cons_wins, cons_losses
|
||||
|
||||
|
||||
def generate_trading_stats(results: DataFrame) -> Dict[str, Any]:
|
||||
""" Generate overall trade statistics """
|
||||
if len(results) == 0:
|
||||
|
@ -371,9 +278,12 @@ def generate_trading_stats(results: DataFrame) -> Dict[str, Any]:
|
|||
'wins': 0,
|
||||
'losses': 0,
|
||||
'draws': 0,
|
||||
'winrate': 0,
|
||||
'holding_avg': timedelta(),
|
||||
'winner_holding_avg': timedelta(),
|
||||
'loser_holding_avg': timedelta(),
|
||||
'max_consecutive_wins': 0,
|
||||
'max_consecutive_losses': 0,
|
||||
}
|
||||
|
||||
winning_trades = results.loc[results['profit_ratio'] > 0]
|
||||
|
@ -386,17 +296,21 @@ def generate_trading_stats(results: DataFrame) -> Dict[str, Any]:
|
|||
if not winning_trades.empty else timedelta())
|
||||
loser_holding_avg = (timedelta(minutes=round(losing_trades['trade_duration'].mean()))
|
||||
if not losing_trades.empty else timedelta())
|
||||
winstreak, loss_streak = calc_streak(results)
|
||||
|
||||
return {
|
||||
'wins': len(winning_trades),
|
||||
'losses': len(losing_trades),
|
||||
'draws': len(draw_trades),
|
||||
'winrate': len(winning_trades) / len(results) if len(results) else 0.0,
|
||||
'holding_avg': holding_avg,
|
||||
'holding_avg_s': holding_avg.total_seconds(),
|
||||
'winner_holding_avg': winner_holding_avg,
|
||||
'winner_holding_avg_s': winner_holding_avg.total_seconds(),
|
||||
'loser_holding_avg': loser_holding_avg,
|
||||
'loser_holding_avg_s': loser_holding_avg.total_seconds(),
|
||||
'max_consecutive_wins': winstreak,
|
||||
'max_consecutive_losses': loss_streak,
|
||||
}
|
||||
|
||||
|
||||
|
@ -489,6 +403,7 @@ def generate_strategy_stats(pairlist: List[str],
|
|||
losing_profit = results.loc[results['profit_abs'] < 0, 'profit_abs'].sum()
|
||||
profit_factor = winning_profit / abs(losing_profit) if losing_profit else 0.0
|
||||
|
||||
expectancy, expectancy_ratio = calculate_expectancy(results)
|
||||
backtest_days = (max_date - min_date).days or 1
|
||||
strat_stats = {
|
||||
'trades': results.to_dict(orient='records'),
|
||||
|
@ -514,7 +429,8 @@ def generate_strategy_stats(pairlist: List[str],
|
|||
'profit_total_long_abs': results.loc[~results['is_short'], 'profit_abs'].sum(),
|
||||
'profit_total_short_abs': results.loc[results['is_short'], 'profit_abs'].sum(),
|
||||
'cagr': calculate_cagr(backtest_days, start_balance, content['final_balance']),
|
||||
'expectancy': calculate_expectancy(results),
|
||||
'expectancy': expectancy,
|
||||
'expectancy_ratio': expectancy_ratio,
|
||||
'sortino': calculate_sortino(results, min_date, max_date, start_balance),
|
||||
'sharpe': calculate_sharpe(results, min_date, max_date, start_balance),
|
||||
'calmar': calculate_calmar(results, min_date, max_date, start_balance),
|
||||
|
@ -620,7 +536,7 @@ def generate_strategy_stats(pairlist: List[str],
|
|||
def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
||||
all_results: Dict[str, Dict[str, Union[DataFrame, Dict]]],
|
||||
min_date: datetime, max_date: datetime
|
||||
) -> Dict[str, Any]:
|
||||
) -> BacktestResultType:
|
||||
"""
|
||||
:param btdata: Backtest data
|
||||
:param all_results: backtest result - dictionary in the form:
|
||||
|
@ -629,7 +545,7 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
|||
:param max_date: Backtest end date
|
||||
:return: Dictionary containing results per strategy and a strategy summary.
|
||||
"""
|
||||
result: Dict[str, Any] = {
|
||||
result: BacktestResultType = {
|
||||
'metadata': {},
|
||||
'strategy': {},
|
||||
'strategy_comparison': [],
|
||||
|
@ -652,357 +568,3 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
|
|||
result['strategy_comparison'] = strategy_results
|
||||
|
||||
return result
|
||||
|
||||
|
||||
###
|
||||
# Start output section
|
||||
###
|
||||
|
||||
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
|
||||
headers = _get_line_header('Pair', stake_currency)
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
|
||||
] for t in pair_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generate small table outlining Backtest results
|
||||
:param sell_reason_stats: Exit reason metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
'Exit Reason',
|
||||
'Exits',
|
||||
'Win Draws Loss Win%',
|
||||
'Avg Profit %',
|
||||
'Cum Profit %',
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Tot Profit %',
|
||||
]
|
||||
|
||||
output = [[
|
||||
t.get('exit_reason', t.get('sell_reason')), t['trades'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
|
||||
t['profit_mean_pct'], t['profit_sum_pct'],
|
||||
round_coin_value(t['profit_total_abs'], stake_currency, False),
|
||||
t['profit_total_pct'],
|
||||
] for t in exit_reason_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
|
||||
"""
|
||||
Generates and returns a text table for the given backtest data and the results dataframe
|
||||
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
if (tag_type == "enter_tag"):
|
||||
headers = _get_line_header("TAG", stake_currency)
|
||||
else:
|
||||
headers = _get_line_header("TAG", stake_currency, 'Exits')
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
output = [
|
||||
[
|
||||
t['key'] if t['key'] is not None and len(
|
||||
t['key']) > 0 else "OTHER",
|
||||
t['trades'],
|
||||
t['profit_mean_pct'],
|
||||
t['profit_sum_pct'],
|
||||
t['profit_total_abs'],
|
||||
t['profit_total_pct'],
|
||||
t['duration_avg'],
|
||||
generate_wins_draws_losses(
|
||||
t['wins'],
|
||||
t['draws'],
|
||||
t['losses'])] for t in tag_results]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
|
||||
stake_currency: str, period: str) -> str:
|
||||
"""
|
||||
Generate small table with Backtest results by days
|
||||
:param days_breakdown_stats: Days breakdown metrics
|
||||
:param stake_currency: Stakecurrency used
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
headers = [
|
||||
period.capitalize(),
|
||||
f'Tot Profit {stake_currency}',
|
||||
'Wins',
|
||||
'Draws',
|
||||
'Losses',
|
||||
]
|
||||
output = [[
|
||||
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
|
||||
d['wins'], d['draws'], d['loses'],
|
||||
] for d in days_breakdown_stats]
|
||||
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_strategy(strategy_results, stake_currency: str) -> str:
|
||||
"""
|
||||
Generate summary table per strategy
|
||||
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
|
||||
:param stake_currency: stake-currency - used to correctly name headers
|
||||
:return: pretty printed table with tabulate as string
|
||||
"""
|
||||
floatfmt = _get_line_floatfmt(stake_currency)
|
||||
headers = _get_line_header('Strategy', stake_currency)
|
||||
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
|
||||
# therefore we slip this column in only for strategy summary here.
|
||||
headers.append('Drawdown')
|
||||
|
||||
# Align drawdown string on the center two space separator.
|
||||
if 'max_drawdown_account' in strategy_results[0]:
|
||||
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
|
||||
else:
|
||||
# Support for prior backtest results
|
||||
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
|
||||
|
||||
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
|
||||
dd_pad_per = max([len(dd) for dd in drawdown])
|
||||
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
|
||||
for t, dd in zip(strategy_results, drawdown)]
|
||||
|
||||
output = [[
|
||||
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
|
||||
t['profit_total_pct'], t['duration_avg'],
|
||||
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
|
||||
for t, drawdown in zip(strategy_results, drawdown)]
|
||||
# Ignore type as floatfmt does allow tuples but mypy does not know that
|
||||
return tabulate(output, headers=headers,
|
||||
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
|
||||
|
||||
|
||||
def text_table_add_metrics(strat_results: Dict) -> str:
|
||||
if len(strat_results['trades']) > 0:
|
||||
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
|
||||
|
||||
short_metrics = [
|
||||
('', ''), # Empty line to improve readability
|
||||
('Long / Short',
|
||||
f"{strat_results.get('trade_count_long', 'total_trades')} / "
|
||||
f"{strat_results.get('trade_count_short', 0)}"),
|
||||
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
|
||||
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
|
||||
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
] if strat_results.get('trade_count_short', 0) > 0 else []
|
||||
|
||||
drawdown_metrics = []
|
||||
if 'max_relative_drawdown' in strat_results:
|
||||
# Compatibility to show old hyperopt results
|
||||
drawdown_metrics.append(
|
||||
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
|
||||
)
|
||||
drawdown_metrics.extend([
|
||||
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
|
||||
if 'max_drawdown_account' in strat_results else (
|
||||
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
|
||||
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
|
||||
strat_results['stake_currency'])),
|
||||
('Drawdown Start', strat_results['drawdown_start']),
|
||||
('Drawdown End', strat_results['drawdown_end']),
|
||||
])
|
||||
|
||||
entry_adjustment_metrics = [
|
||||
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
|
||||
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
|
||||
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
|
||||
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
|
||||
|
||||
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
|
||||
# command stores these results and newer version of freqtrade must be able to handle old
|
||||
# results with missing new fields.
|
||||
metrics = [
|
||||
('Backtesting from', strat_results['backtest_start']),
|
||||
('Backtesting to', strat_results['backtest_end']),
|
||||
('Max open trades', strat_results['max_open_trades']),
|
||||
('', ''), # Empty line to improve readability
|
||||
('Total/Daily Avg Trades',
|
||||
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
|
||||
|
||||
('Starting balance', round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Final balance', round_coin_value(strat_results['final_balance'],
|
||||
strat_results['stake_currency'])),
|
||||
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total profit %', f"{strat_results['profit_total']:.2%}"),
|
||||
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
|
||||
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
|
||||
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
|
||||
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
|
||||
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
|
||||
in strat_results else 'N/A'),
|
||||
('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
|
||||
in strat_results else 'N/A'),
|
||||
('Trades per day', strat_results['trades_per_day']),
|
||||
('Avg. daily profit %',
|
||||
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
|
||||
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
|
||||
strat_results['stake_currency'])),
|
||||
('Total trade volume', round_coin_value(strat_results['total_volume'],
|
||||
strat_results['stake_currency'])),
|
||||
*short_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
('Best Pair', f"{strat_results['best_pair']['key']} "
|
||||
f"{strat_results['best_pair']['profit_sum']:.2%}"),
|
||||
('Worst Pair', f"{strat_results['worst_pair']['key']} "
|
||||
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
|
||||
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
||||
('Worst trade', f"{worst_trade['pair']} "
|
||||
f"{worst_trade['profit_ratio']:.2%}"),
|
||||
|
||||
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
|
||||
strat_results['stake_currency'])),
|
||||
('Days win/draw/lose', f"{strat_results['winning_days']} / "
|
||||
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
|
||||
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
|
||||
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
|
||||
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
|
||||
('Entry/Exit Timeouts',
|
||||
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
|
||||
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
|
||||
*entry_adjustment_metrics,
|
||||
('', ''), # Empty line to improve readability
|
||||
|
||||
('Min balance', round_coin_value(strat_results['csum_min'],
|
||||
strat_results['stake_currency'])),
|
||||
('Max balance', round_coin_value(strat_results['csum_max'],
|
||||
strat_results['stake_currency'])),
|
||||
|
||||
*drawdown_metrics,
|
||||
('Market change', f"{strat_results['market_change']:.2%}"),
|
||||
]
|
||||
|
||||
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
|
||||
else:
|
||||
start_balance = round_coin_value(strat_results['starting_balance'],
|
||||
strat_results['stake_currency'])
|
||||
stake_amount = round_coin_value(
|
||||
strat_results['stake_amount'], strat_results['stake_currency']
|
||||
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
|
||||
|
||||
message = ("No trades made. "
|
||||
f"Your starting balance was {start_balance}, "
|
||||
f"and your stake was {stake_amount}."
|
||||
)
|
||||
return message
|
||||
|
||||
|
||||
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
|
||||
backtest_breakdown=[]):
|
||||
"""
|
||||
Print results for one strategy
|
||||
"""
|
||||
# Print results
|
||||
print(f"Result for strategy {strategy}")
|
||||
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
|
||||
if isinstance(table, str):
|
||||
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if (results.get('results_per_enter_tag') is not None
|
||||
or results.get('results_per_buy_tag') is not None):
|
||||
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
|
||||
table = text_table_tags(
|
||||
"enter_tag",
|
||||
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
|
||||
stake_currency=stake_currency)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
|
||||
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
|
||||
stake_currency=stake_currency)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
for period in backtest_breakdown:
|
||||
if period in results.get('periodic_breakdown', {}):
|
||||
days_breakdown_stats = results['periodic_breakdown'][period]
|
||||
else:
|
||||
days_breakdown_stats = generate_periodic_breakdown_stats(
|
||||
trade_list=results['trades'], period=period)
|
||||
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
|
||||
stake_currency=stake_currency, period=period)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
table = text_table_add_metrics(results)
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
|
||||
if isinstance(table, str) and len(table) > 0:
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def show_backtest_results(config: Config, backtest_stats: Dict):
|
||||
stake_currency = config['stake_currency']
|
||||
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
show_backtest_result(
|
||||
strategy, results, stake_currency,
|
||||
config.get('backtest_breakdown', []))
|
||||
|
||||
if len(backtest_stats['strategy']) > 0:
|
||||
# Print Strategy summary table
|
||||
|
||||
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
|
||||
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
|
||||
f" Max open trades : {results['max_open_trades']}")
|
||||
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
|
||||
print(table)
|
||||
print('=' * len(table.splitlines()[0]))
|
||||
print('\nFor more details, please look at the detail tables above')
|
||||
|
||||
|
||||
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
|
||||
if config.get('backtest_show_pair_list', False):
|
||||
for strategy, results in backtest_stats['strategy'].items():
|
||||
print(f"Pairs for Strategy {strategy}: \n[")
|
||||
for result in results['results_per_pair']:
|
||||
if result["key"] != 'TOTAL':
|
||||
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
|
||||
print("]")
|
|
@ -42,7 +42,7 @@ class _KeyValueStoreModel(ModelBase):
|
|||
int_value: Mapped[Optional[int]]
|
||||
|
||||
|
||||
class KeyValueStore():
|
||||
class KeyValueStore:
|
||||
"""
|
||||
Generic bot-wide, persistent key-value store
|
||||
Can be used to store generic values, e.g. very first bot startup time.
|
||||
|
|
|
@ -11,7 +11,7 @@ from freqtrade.persistence.models import PairLock
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PairLocks():
|
||||
class PairLocks:
|
||||
"""
|
||||
Pairlocks middleware class
|
||||
Abstracts the database layer away so it becomes optional - which will be necessary to support
|
||||
|
|
|
@ -10,6 +10,7 @@ from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast
|
|||
from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String,
|
||||
UniqueConstraint, desc, func, select)
|
||||
from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates
|
||||
from typing_extensions import Self
|
||||
|
||||
from freqtrade.constants import (CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC,
|
||||
NON_OPEN_EXCHANGE_STATES, BuySell, LongShort)
|
||||
|
@ -37,6 +38,7 @@ class Order(ModelBase):
|
|||
Mirrors CCXT Order structure
|
||||
"""
|
||||
__tablename__ = 'orders'
|
||||
__allow_unmapped__ = True
|
||||
session: ClassVar[SessionType]
|
||||
|
||||
# Uniqueness should be ensured over pair, order_id
|
||||
|
@ -46,7 +48,8 @@ class Order(ModelBase):
|
|||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
ft_trade_id: Mapped[int] = mapped_column(Integer, ForeignKey('trades.id'), index=True)
|
||||
|
||||
trade: Mapped[List["Trade"]] = relationship("Trade", back_populates="orders")
|
||||
_trade_live: Mapped["Trade"] = relationship("Trade", back_populates="orders")
|
||||
_trade_bt: "LocalTrade" = None # type: ignore
|
||||
|
||||
# order_side can only be 'buy', 'sell' or 'stoploss'
|
||||
ft_order_side: Mapped[str] = mapped_column(String(25), nullable=False)
|
||||
|
@ -118,11 +121,20 @@ class Order(ModelBase):
|
|||
def safe_amount_after_fee(self) -> float:
|
||||
return self.safe_filled - self.safe_fee_base
|
||||
|
||||
@property
|
||||
def trade(self) -> "LocalTrade":
|
||||
return self._trade_bt or self._trade_live
|
||||
|
||||
@property
|
||||
def stake_amount(self) -> float:
|
||||
""" Amount in stake currency used for this order"""
|
||||
return self.safe_amount * self.safe_price / self.trade.leverage
|
||||
|
||||
def __repr__(self):
|
||||
|
||||
return (f"Order(id={self.id}, order_id={self.order_id}, trade_id={self.ft_trade_id}, "
|
||||
return (f"Order(id={self.id}, trade={self.ft_trade_id}, order_id={self.order_id}, "
|
||||
f"side={self.side}, filled={self.safe_filled}, price={self.safe_price}, "
|
||||
f"order_type={self.order_type}, status={self.status})")
|
||||
f"status={self.status}, date={self.order_date:{DATETIME_PRINT_FORMAT}})")
|
||||
|
||||
def update_from_ccxt_object(self, order):
|
||||
"""
|
||||
|
@ -211,6 +223,7 @@ class Order(ModelBase):
|
|||
'order_type': self.order_type,
|
||||
'price': self.price,
|
||||
'remaining': self.remaining,
|
||||
'ft_fee_base': self.ft_fee_base,
|
||||
})
|
||||
return resp
|
||||
|
||||
|
@ -246,15 +259,15 @@ class Order(ModelBase):
|
|||
else:
|
||||
logger.warning(f"Did not find order for {order}.")
|
||||
|
||||
@staticmethod
|
||||
@classmethod
|
||||
def parse_from_ccxt_object(
|
||||
order: Dict[str, Any], pair: str, side: str,
|
||||
amount: Optional[float] = None, price: Optional[float] = None) -> 'Order':
|
||||
cls, order: Dict[str, Any], pair: str, side: str,
|
||||
amount: Optional[float] = None, price: Optional[float] = None) -> Self:
|
||||
"""
|
||||
Parse an order from a ccxt object and return a new order Object.
|
||||
Optional support for overriding amount and price is only used for test simplification.
|
||||
"""
|
||||
o = Order(
|
||||
o = cls(
|
||||
order_id=str(order['id']),
|
||||
ft_order_side=side,
|
||||
ft_pair=pair,
|
||||
|
@ -282,7 +295,7 @@ class Order(ModelBase):
|
|||
return Order.session.scalars(select(Order).filter(Order.order_id == order_id)).first()
|
||||
|
||||
|
||||
class LocalTrade():
|
||||
class LocalTrade:
|
||||
"""
|
||||
Trade database model.
|
||||
Used in backtesting - must be aligned to Trade model!
|
||||
|
@ -1329,9 +1342,12 @@ class Trade(ModelBase, LocalTrade):
|
|||
Float(), nullable=True, default=None) # type: ignore
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
from_json = kwargs.pop('__FROM_JSON', None)
|
||||
super().__init__(**kwargs)
|
||||
self.realized_profit = 0
|
||||
self.recalc_open_trade_value()
|
||||
if not from_json:
|
||||
# Skip recalculation when loading from json
|
||||
self.realized_profit = 0
|
||||
self.recalc_open_trade_value()
|
||||
|
||||
@validates('enter_tag', 'exit_reason')
|
||||
def validate_string_len(self, key, value):
|
||||
|
@ -1673,8 +1689,8 @@ class Trade(ModelBase, LocalTrade):
|
|||
)).scalar_one()
|
||||
return trading_volume
|
||||
|
||||
@staticmethod
|
||||
def from_json(json_str: str) -> 'Trade':
|
||||
@classmethod
|
||||
def from_json(cls, json_str: str) -> Self:
|
||||
"""
|
||||
Create a Trade instance from a json string.
|
||||
|
||||
|
@ -1684,7 +1700,8 @@ class Trade(ModelBase, LocalTrade):
|
|||
"""
|
||||
import rapidjson
|
||||
data = rapidjson.loads(json_str)
|
||||
trade = Trade(
|
||||
trade = cls(
|
||||
__FROM_JSON=True,
|
||||
id=data["trade_id"],
|
||||
pair=data["pair"],
|
||||
base_currency=data["base_currency"],
|
||||
|
@ -1738,6 +1755,7 @@ class Trade(ModelBase, LocalTrade):
|
|||
|
||||
order_obj = Order(
|
||||
amount=order["amount"],
|
||||
ft_amount=order["amount"],
|
||||
ft_order_side=order["ft_order_side"],
|
||||
ft_pair=order["pair"],
|
||||
ft_is_open=order["is_open"],
|
||||
|
@ -1752,6 +1770,7 @@ class Trade(ModelBase, LocalTrade):
|
|||
if order["order_filled_timestamp"] else None),
|
||||
order_type=order["order_type"],
|
||||
price=order["price"],
|
||||
ft_price=order["price"],
|
||||
remaining=order["remaining"],
|
||||
)
|
||||
trade.orders.append(order_obj)
|
||||
|
|
|
@ -55,7 +55,7 @@ def init_plotscript(config, markets: List, startup_candles: int = 0):
|
|||
timeframe=config['timeframe'],
|
||||
timerange=timerange,
|
||||
startup_candles=startup_candles,
|
||||
data_format=config.get('dataformat_ohlcv', 'json'),
|
||||
data_format=config['dataformat_ohlcv'],
|
||||
candle_type=config.get('candle_type_def', CandleType.SPOT)
|
||||
)
|
||||
|
||||
|
@ -84,7 +84,7 @@ def init_plotscript(config, markets: List, startup_candles: int = 0):
|
|||
except ValueError as e:
|
||||
raise OperationalException(e) from e
|
||||
if not trades.empty:
|
||||
trades = trim_dataframe(trades, timerange, 'open_date')
|
||||
trades = trim_dataframe(trades, timerange, df_date_col='open_date')
|
||||
|
||||
return {"ohlcv": data,
|
||||
"trades": trades,
|
||||
|
|
|
@ -89,10 +89,10 @@ class PerformanceFilter(IPairList):
|
|||
# Sort the list using:
|
||||
# - primarily performance (high to low)
|
||||
# - then count (low to high, so as to favor same performance with fewer trades)
|
||||
# - then pair name alphametically
|
||||
# - then by prior index, keeping original sorting order
|
||||
sorted_df = list_df.merge(performance, on='pair', how='left')\
|
||||
.fillna(0).sort_values(by=['count', 'prior_idx'], ascending=True)\
|
||||
.sort_values(by=['profit_ratio'], ascending=False)
|
||||
.fillna(0).sort_values(by=['profit_ratio', 'count', 'prior_idx'],
|
||||
ascending=[False, True, True])
|
||||
if self._min_profit is not None:
|
||||
removed = sorted_df[sorted_df['profit_ratio'] < self._min_profit]
|
||||
for _, row in removed.iterrows():
|
||||
|
|
|
@ -3,19 +3,21 @@ Remote PairList provider
|
|||
|
||||
Provides pair list fetched from a remote source
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import rapidjson
|
||||
import requests
|
||||
from cachetools import TTLCache
|
||||
|
||||
from freqtrade import __version__
|
||||
from freqtrade.configuration.load_config import CONFIG_PARSE_MODE
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange.types import Tickers
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -40,6 +42,8 @@ class RemotePairList(IPairList):
|
|||
'`pairlist_url` not specified. Please check your configuration '
|
||||
'for "pairlist.config.pairlist_url"')
|
||||
|
||||
self._mode = self._pairlistconfig.get('mode', 'whitelist')
|
||||
self._processing_mode = self._pairlistconfig.get('processing_mode', 'filter')
|
||||
self._number_pairs = self._pairlistconfig['number_assets']
|
||||
self._refresh_period: int = self._pairlistconfig.get('refresh_period', 1800)
|
||||
self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True)
|
||||
|
@ -50,6 +54,21 @@ class RemotePairList(IPairList):
|
|||
self._init_done = False
|
||||
self._last_pairlist: List[Any] = list()
|
||||
|
||||
if self._mode not in ['whitelist', 'blacklist']:
|
||||
raise OperationalException(
|
||||
'`mode` not configured correctly. Supported Modes '
|
||||
'are "whitelist","blacklist"')
|
||||
|
||||
if self._processing_mode not in ['filter', 'append']:
|
||||
raise OperationalException(
|
||||
'`processing_mode` not configured correctly. Supported Modes '
|
||||
'are "filter","append"')
|
||||
|
||||
if self._pairlist_pos == 0 and self._mode == 'blacklist':
|
||||
raise OperationalException(
|
||||
'A `blacklist` mode RemotePairList can not be on the first '
|
||||
'position of your pairlist.')
|
||||
|
||||
@property
|
||||
def needstickers(self) -> bool:
|
||||
"""
|
||||
|
@ -67,23 +86,37 @@ class RemotePairList(IPairList):
|
|||
|
||||
@staticmethod
|
||||
def description() -> str:
|
||||
return "Retrieve pairs from a remote API."
|
||||
return "Retrieve pairs from a remote API or local file."
|
||||
|
||||
@staticmethod
|
||||
def available_parameters() -> Dict[str, PairlistParameter]:
|
||||
return {
|
||||
"number_assets": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number of assets",
|
||||
"help": "Number of assets to use from the pairlist.",
|
||||
},
|
||||
"pairlist_url": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "URL to fetch pairlist from",
|
||||
"help": "URL to fetch pairlist from",
|
||||
},
|
||||
"number_assets": {
|
||||
"type": "number",
|
||||
"default": 30,
|
||||
"description": "Number of assets",
|
||||
"help": "Number of assets to use from the pairlist.",
|
||||
},
|
||||
"mode": {
|
||||
"type": "option",
|
||||
"default": "whitelist",
|
||||
"options": ["whitelist", "blacklist"],
|
||||
"description": "Pairlist mode",
|
||||
"help": "Should this pairlist operate as a whitelist or blacklist?",
|
||||
},
|
||||
"processing_mode": {
|
||||
"type": "option",
|
||||
"default": "filter",
|
||||
"options": ["filter", "append"],
|
||||
"description": "Processing mode",
|
||||
"help": "Append pairs to incomming pairlist or filter them?",
|
||||
},
|
||||
**IPairList.refresh_period_parameter(),
|
||||
"keep_pairlist_on_failure": {
|
||||
"type": "boolean",
|
||||
|
@ -204,7 +237,7 @@ class RemotePairList(IPairList):
|
|||
if file_path.exists():
|
||||
with file_path.open() as json_file:
|
||||
# Load the JSON data into a dictionary
|
||||
jsonparse = json.load(json_file)
|
||||
jsonparse = rapidjson.load(json_file, parse_mode=CONFIG_PARSE_MODE)
|
||||
|
||||
try:
|
||||
pairlist = self.process_json(jsonparse)
|
||||
|
@ -223,6 +256,7 @@ class RemotePairList(IPairList):
|
|||
|
||||
self.log_once(f"Fetched pairs: {pairlist}", logger.debug)
|
||||
|
||||
pairlist = expand_pairlist(pairlist, list(self._exchange.get_markets().keys()))
|
||||
pairlist = self._whitelist_for_active_markets(pairlist)
|
||||
pairlist = pairlist[:self._number_pairs]
|
||||
|
||||
|
@ -250,6 +284,23 @@ class RemotePairList(IPairList):
|
|||
:return: new whitelist
|
||||
"""
|
||||
rpl_pairlist = self.gen_pairlist(tickers)
|
||||
merged_list = pairlist + rpl_pairlist
|
||||
merged_list = sorted(set(merged_list), key=merged_list.index)
|
||||
merged_list = []
|
||||
filtered = []
|
||||
|
||||
if self._mode == "whitelist":
|
||||
if self._processing_mode == "filter":
|
||||
merged_list = [pair for pair in pairlist if pair in rpl_pairlist]
|
||||
elif self._processing_mode == "append":
|
||||
merged_list = pairlist + rpl_pairlist
|
||||
merged_list = sorted(set(merged_list), key=merged_list.index)
|
||||
else:
|
||||
for pair in pairlist:
|
||||
if pair not in rpl_pairlist:
|
||||
merged_list.append(pair)
|
||||
else:
|
||||
filtered.append(pair)
|
||||
if filtered:
|
||||
self.log_once(f"Blacklist - Filtered out pairs: {filtered}", logger.info)
|
||||
|
||||
merged_list = merged_list[:self._number_pairs]
|
||||
return merged_list
|
||||
|
|
|
@ -13,9 +13,8 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
|
|||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date
|
||||
from freqtrade.exchange.types import Tickers
|
||||
from freqtrade.misc import format_ms_time
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
|
||||
from freqtrade.util import dt_now
|
||||
from freqtrade.util import dt_now, format_ms_time
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
|
@ -15,7 +15,7 @@ from freqtrade.resolvers import ProtectionResolver
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProtectionManager():
|
||||
class ProtectionManager:
|
||||
|
||||
def __init__(self, config: Config, protections: List) -> None:
|
||||
self._config = config
|
||||
|
|
|
@ -2,6 +2,7 @@ import asyncio
|
|||
import logging
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends
|
||||
|
@ -9,16 +10,18 @@ from fastapi.exceptions import HTTPException
|
|||
|
||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result
|
||||
from freqtrade.data.btanalysis import (delete_backtest_result, get_backtest_resultlist,
|
||||
load_and_merge_backtest_result)
|
||||
from freqtrade.enums import BacktestState
|
||||
from freqtrade.exceptions import DependencyException, OperationalException
|
||||
from freqtrade.exchange.common import remove_exchange_credentials
|
||||
from freqtrade.misc import deep_merge_dicts
|
||||
from freqtrade.misc import deep_merge_dicts, is_file_in_dir
|
||||
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
|
||||
BacktestResponse)
|
||||
from freqtrade.rpc.api_server.deps import get_config
|
||||
from freqtrade.rpc.api_server.webserver_bgwork import ApiBG
|
||||
from freqtrade.rpc.rpc import RPCException
|
||||
from freqtrade.types import get_BacktestResultType_default
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -67,7 +70,7 @@ def __run_backtest_bg(btconfig: Config):
|
|||
|
||||
ApiBG.bt['bt'].enable_protections = btconfig.get('enable_protections', False)
|
||||
ApiBG.bt['bt'].strategylist = [strat]
|
||||
ApiBG.bt['bt'].results = {}
|
||||
ApiBG.bt['bt'].results = get_BacktestResultType_default()
|
||||
ApiBG.bt['bt'].load_prior_backtest()
|
||||
|
||||
ApiBG.bt['bt'].abort = False
|
||||
|
@ -245,13 +248,16 @@ def api_backtest_history(config=Depends(get_config)):
|
|||
tags=['webserver', 'backtest'])
|
||||
def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config)):
|
||||
# Get backtest result history, read from metadata files
|
||||
fn = config['user_data_dir'] / 'backtest_results' / filename
|
||||
bt_results_base: Path = config['user_data_dir'] / 'backtest_results'
|
||||
fn = (bt_results_base / filename).with_suffix('.json')
|
||||
|
||||
results: Dict[str, Any] = {
|
||||
'metadata': {},
|
||||
'strategy': {},
|
||||
'strategy_comparison': [],
|
||||
}
|
||||
|
||||
if not is_file_in_dir(fn, bt_results_base):
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
load_and_merge_backtest_result(strategy, fn, results)
|
||||
return {
|
||||
"status": "ended",
|
||||
|
@ -261,3 +267,17 @@ def api_backtest_history_result(filename: str, strategy: str, config=Depends(get
|
|||
"status_msg": "Historic result",
|
||||
"backtest_result": results,
|
||||
}
|
||||
|
||||
|
||||
@router.delete('/backtest/history/{file}', response_model=List[BacktestHistoryEntry],
|
||||
tags=['webserver', 'backtest'])
|
||||
def api_delete_backtest_history_entry(file: str, config=Depends(get_config)):
|
||||
# Get backtest result history, read from metadata files
|
||||
bt_results_base: Path = config['user_data_dir'] / 'backtest_results'
|
||||
file_abs = (bt_results_base / file).with_suffix('.json')
|
||||
# Ensure file is in backtest_results directory
|
||||
if not is_file_in_dir(file_abs, bt_results_base):
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
|
||||
delete_backtest_result(file_abs)
|
||||
return get_backtest_resultlist(config['user_data_dir'] / 'backtest_results')
|
||||
|
|
|
@ -136,6 +136,9 @@ class Profit(BaseModel):
|
|||
winning_trades: int
|
||||
losing_trades: int
|
||||
profit_factor: float
|
||||
winrate: float
|
||||
expectancy: float
|
||||
expectancy_ratio: float
|
||||
max_drawdown: float
|
||||
max_drawdown_abs: float
|
||||
trading_volume: Optional[float]
|
||||
|
@ -238,6 +241,7 @@ class OrderSchema(BaseModel):
|
|||
is_open: bool
|
||||
order_timestamp: Optional[int]
|
||||
order_filled_timestamp: Optional[int]
|
||||
ft_fee_base: Optional[float]
|
||||
|
||||
|
||||
class TradeSchema(BaseModel):
|
||||
|
@ -516,6 +520,7 @@ class BacktestResponse(BaseModel):
|
|||
backtest_result: Optional[Dict[str, Any]]
|
||||
|
||||
|
||||
# TODO: This is a copy of BacktestHistoryEntryType
|
||||
class BacktestHistoryEntry(BaseModel):
|
||||
filename: str
|
||||
strategy: str
|
||||
|
|
|
@ -49,7 +49,8 @@ logger = logging.getLogger(__name__)
|
|||
# 2.28: Switch reload endpoint to Post
|
||||
# 2.29: Add /exchanges endpoint
|
||||
# 2.30: new /pairlists endpoint
|
||||
API_VERSION = 2.30
|
||||
# 2.31: new /backtest/history/ delete endpoint
|
||||
API_VERSION = 2.31
|
||||
|
||||
# Public API, requires no auth.
|
||||
router_public = APIRouter()
|
||||
|
@ -268,7 +269,10 @@ def pair_history(pair: str, timeframe: str, timerange: str, strategy: str,
|
|||
'timerange': timerange,
|
||||
'freqaimodel': freqaimodel if freqaimodel else config.get('freqaimodel'),
|
||||
})
|
||||
return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange)
|
||||
try:
|
||||
return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=502, detail=str(e))
|
||||
|
||||
|
||||
@router.get('/plot_config', response_model=PlotConfig, tags=['candle data'])
|
||||
|
@ -283,7 +287,10 @@ def plot_config(strategy: Optional[str] = None, config=Depends(get_config),
|
|||
config1.update({
|
||||
'strategy': strategy
|
||||
})
|
||||
try:
|
||||
return PlotConfig.parse_obj(RPC._rpc_plot_config_with_strategy(config1))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=502, detail=str(e))
|
||||
|
||||
|
||||
@router.get('/strategies', response_model=StrategyListResponse, tags=['strategy'])
|
||||
|
@ -308,7 +315,8 @@ def get_strategy(strategy: str, config=Depends(get_config)):
|
|||
extra_dir=config_.get('strategy_path'))
|
||||
except OperationalException:
|
||||
raise HTTPException(status_code=404, detail='Strategy not found')
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=502, detail=str(e))
|
||||
return {
|
||||
'strategy': strategy_obj.get_strategy_name(),
|
||||
'code': strategy_obj.__source__,
|
||||
|
|
|
@ -7,12 +7,14 @@ from fastapi.websockets import WebSocket
|
|||
from pydantic import ValidationError
|
||||
|
||||
from freqtrade.enums import RPCMessageType, RPCRequestType
|
||||
from freqtrade.exceptions import FreqtradeException
|
||||
from freqtrade.rpc.api_server.api_auth import validate_ws_token
|
||||
from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc
|
||||
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema,
|
||||
WSRequestSchema, WSWhitelistMessage)
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage,
|
||||
WSMessageSchema, WSRequestSchema,
|
||||
WSWhitelistMessage)
|
||||
from freqtrade.rpc.rpc import RPC
|
||||
|
||||
|
||||
|
@ -27,7 +29,13 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC):
|
|||
Iterate over the messages from the channel and process the request
|
||||
"""
|
||||
async for message in channel:
|
||||
await _process_consumer_request(message, channel, rpc)
|
||||
try:
|
||||
await _process_consumer_request(message, channel, rpc)
|
||||
except FreqtradeException:
|
||||
logger.exception(f"Error processing request from {channel}")
|
||||
response = WSErrorMessage(data='Error processing request')
|
||||
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
|
||||
async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream):
|
||||
|
@ -62,13 +70,13 @@ async def _process_consumer_request(
|
|||
logger.error(f"Invalid request from {channel}: {e}")
|
||||
return
|
||||
|
||||
type, data = websocket_request.type, websocket_request.data
|
||||
type_, data = websocket_request.type, websocket_request.data
|
||||
response: WSMessageSchema
|
||||
|
||||
logger.debug(f"Request of type {type} from {channel}")
|
||||
logger.debug(f"Request of type {type_} from {channel}")
|
||||
|
||||
# If we have a request of type SUBSCRIBE, set the topics in this channel
|
||||
if type == RPCRequestType.SUBSCRIBE:
|
||||
if type_ == RPCRequestType.SUBSCRIBE:
|
||||
# If the request is empty, do nothing
|
||||
if not data:
|
||||
return
|
||||
|
@ -80,7 +88,7 @@ async def _process_consumer_request(
|
|||
# We don't send a response for subscriptions
|
||||
return
|
||||
|
||||
elif type == RPCRequestType.WHITELIST:
|
||||
elif type_ == RPCRequestType.WHITELIST:
|
||||
# Get whitelist
|
||||
whitelist = rpc._ws_request_whitelist()
|
||||
|
||||
|
@ -88,7 +96,7 @@ async def _process_consumer_request(
|
|||
response = WSWhitelistMessage(data=whitelist)
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
elif type == RPCRequestType.ANALYZED_DF:
|
||||
elif type_ == RPCRequestType.ANALYZED_DF:
|
||||
# Limit the amount of candles per dataframe to 'limit' or 1500
|
||||
limit = int(min(data.get('limit', 1500), 1500)) if data else None
|
||||
pair = data.get('pair', None) if data else None
|
||||
|
|
|
@ -30,7 +30,7 @@ async def ui_version():
|
|||
}
|
||||
|
||||
|
||||
def is_relative_to(path, base) -> bool:
|
||||
def is_relative_to(path: Path, base: Path) -> bool:
|
||||
# Helper function simulating behaviour of is_relative_to, which was only added in python 3.9
|
||||
try:
|
||||
path.relative_to(base)
|
||||
|
|
|
@ -8,6 +8,7 @@ from fastapi import Depends, FastAPI
|
|||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from starlette.responses import JSONResponse
|
||||
|
||||
from freqtrade.configuration import running_in_docker
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer
|
||||
|
@ -182,7 +183,7 @@ class ApiServer(RPCHandler):
|
|||
rest_port = self._config['api_server']['listen_port']
|
||||
|
||||
logger.info(f'Starting HTTP Server at {rest_ip}:{rest_port}')
|
||||
if not IPv4Address(rest_ip).is_loopback:
|
||||
if not IPv4Address(rest_ip).is_loopback and not running_in_docker():
|
||||
logger.warning("SECURITY WARNING - Local Rest Server listening to external connections")
|
||||
logger.warning("SECURITY WARNING - This is insecure please set to your loopback,"
|
||||
"e.g 127.0.0.1 in config.json")
|
||||
|
|
|
@ -14,7 +14,7 @@ class JobsContainer(TypedDict):
|
|||
error: Optional[str]
|
||||
|
||||
|
||||
class ApiBG():
|
||||
class ApiBG:
|
||||
# Backtesting type: Backtesting
|
||||
bt: Dict[str, Any] = {
|
||||
'bt': None,
|
||||
|
|
|
@ -66,4 +66,9 @@ class WSAnalyzedDFMessage(WSMessageSchema):
|
|||
type: RPCMessageType = RPCMessageType.ANALYZED_DF
|
||||
data: AnalyzedDFData
|
||||
|
||||
|
||||
class WSErrorMessage(WSMessageSchema):
|
||||
type: RPCMessageType = RPCMessageType.EXCEPTION
|
||||
data: str
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
|
|
|
@ -25,6 +25,7 @@ coingecko_mapping = {
|
|||
'bnb': 'binancecoin',
|
||||
'sol': 'solana',
|
||||
'usdt': 'tether',
|
||||
'busd': 'binance-usd',
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ from freqtrade import __version__
|
|||
from freqtrade.configuration.timerange import TimeRange
|
||||
from freqtrade.constants import CANCEL_REASON, DATETIME_PRINT_FORMAT, Config
|
||||
from freqtrade.data.history import load_data
|
||||
from freqtrade.data.metrics import calculate_max_drawdown
|
||||
from freqtrade.data.metrics import calculate_expectancy, calculate_max_drawdown
|
||||
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, MarketDirection, SignalDirection,
|
||||
State, TradingMode)
|
||||
from freqtrade.exceptions import ExchangeError, PricingError
|
||||
|
@ -501,6 +501,8 @@ class RPC:
|
|||
profit_all_coin.append(profit_abs)
|
||||
profit_all_ratio.append(profit_ratio)
|
||||
|
||||
closed_trade_count = len([t for t in trades if not t.is_open])
|
||||
|
||||
best_pair = Trade.get_best_pair(start_date)
|
||||
trading_volume = Trade.get_trading_volume(start_date)
|
||||
|
||||
|
@ -528,9 +530,14 @@ class RPC:
|
|||
|
||||
profit_factor = winning_profit / abs(losing_profit) if losing_profit else float('inf')
|
||||
|
||||
winrate = (winning_trades / closed_trade_count) if closed_trade_count > 0 else 0
|
||||
|
||||
trades_df = DataFrame([{'close_date': trade.close_date.strftime(DATETIME_PRINT_FORMAT),
|
||||
'profit_abs': trade.close_profit_abs}
|
||||
for trade in trades if not trade.is_open and trade.close_date])
|
||||
|
||||
expectancy, expectancy_ratio = calculate_expectancy(trades_df)
|
||||
|
||||
max_drawdown_abs = 0.0
|
||||
max_drawdown = 0.0
|
||||
if len(trades_df) > 0:
|
||||
|
@ -569,7 +576,7 @@ class RPC:
|
|||
'profit_all_percent': round(profit_all_ratio_fromstart * 100, 2),
|
||||
'profit_all_fiat': profit_all_fiat,
|
||||
'trade_count': len(trades),
|
||||
'closed_trade_count': len([t for t in trades if not t.is_open]),
|
||||
'closed_trade_count': closed_trade_count,
|
||||
'first_trade_date': first_date.strftime(DATETIME_PRINT_FORMAT) if first_date else '',
|
||||
'first_trade_humanized': dt_humanize(first_date) if first_date else '',
|
||||
'first_trade_timestamp': int(first_date.timestamp() * 1000) if first_date else 0,
|
||||
|
@ -583,6 +590,9 @@ class RPC:
|
|||
'winning_trades': winning_trades,
|
||||
'losing_trades': losing_trades,
|
||||
'profit_factor': profit_factor,
|
||||
'winrate': winrate,
|
||||
'expectancy': expectancy,
|
||||
'expectancy_ratio': expectancy_ratio,
|
||||
'max_drawdown': max_drawdown,
|
||||
'max_drawdown_abs': max_drawdown_abs,
|
||||
'trading_volume': trading_volume,
|
||||
|
@ -1183,8 +1193,8 @@ class RPC:
|
|||
""" Analyzed dataframe in Dict form """
|
||||
|
||||
_data, last_analyzed = self.__rpc_analysed_dataframe_raw(pair, timeframe, limit)
|
||||
return self._convert_dataframe_to_dict(self._freqtrade.config['strategy'],
|
||||
pair, timeframe, _data, last_analyzed)
|
||||
return RPC._convert_dataframe_to_dict(self._freqtrade.config['strategy'],
|
||||
pair, timeframe, _data, last_analyzed)
|
||||
|
||||
def __rpc_analysed_dataframe_raw(
|
||||
self,
|
||||
|
@ -1254,27 +1264,34 @@ class RPC:
|
|||
exchange) -> Dict[str, Any]:
|
||||
timerange_parsed = TimeRange.parse_timerange(config.get('timerange'))
|
||||
|
||||
from freqtrade.data.converter import trim_dataframe
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
|
||||
strategy = StrategyResolver.load_strategy(config)
|
||||
startup_candles = strategy.startup_candle_count
|
||||
|
||||
_data = load_data(
|
||||
datadir=config["datadir"],
|
||||
pairs=[pair],
|
||||
timeframe=timeframe,
|
||||
timerange=timerange_parsed,
|
||||
data_format=config.get('dataformat_ohlcv', 'json'),
|
||||
candle_type=config.get('candle_type_def', CandleType.SPOT)
|
||||
data_format=config['dataformat_ohlcv'],
|
||||
candle_type=config.get('candle_type_def', CandleType.SPOT),
|
||||
startup_candles=startup_candles,
|
||||
)
|
||||
if pair not in _data:
|
||||
raise RPCException(
|
||||
f"No data for {pair}, {timeframe} in {config.get('timerange')} found.")
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
strategy = StrategyResolver.load_strategy(config)
|
||||
|
||||
strategy.dp = DataProvider(config, exchange=exchange, pairlists=None)
|
||||
strategy.ft_bot_start()
|
||||
|
||||
df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})
|
||||
df_analyzed = trim_dataframe(df_analyzed, timerange_parsed, startup_candles=startup_candles)
|
||||
|
||||
return RPC._convert_dataframe_to_dict(strategy.get_strategy_name(), pair, timeframe,
|
||||
df_analyzed, dt_now())
|
||||
df_analyzed.copy(), dt_now())
|
||||
|
||||
def _rpc_plot_config(self) -> Dict[str, Any]:
|
||||
if (self._freqtrade.strategy.plot_config and
|
||||
|
|
|
@ -849,6 +849,10 @@ class Telegram(RPCHandler):
|
|||
avg_duration = stats['avg_duration']
|
||||
best_pair = stats['best_pair']
|
||||
best_pair_profit_ratio = stats['best_pair_profit_ratio']
|
||||
winrate = stats['winrate']
|
||||
expectancy = stats['expectancy']
|
||||
expectancy_ratio = stats['expectancy_ratio']
|
||||
|
||||
if stats['trade_count'] == 0:
|
||||
markdown_msg = f"No trades yet.\n*Bot started:* `{stats['bot_start_date']}`"
|
||||
else:
|
||||
|
@ -873,7 +877,9 @@ class Telegram(RPCHandler):
|
|||
f"*{'First Trade opened' if not timescale else 'Showing Profit since'}:* "
|
||||
f"`{first_trade_date}`\n"
|
||||
f"*Latest Trade opened:* `{latest_trade_date}`\n"
|
||||
f"*Win / Loss:* `{stats['winning_trades']} / {stats['losing_trades']}`"
|
||||
f"*Win / Loss:* `{stats['winning_trades']} / {stats['losing_trades']}`\n"
|
||||
f"*Winrate:* `{winrate:.2%}`\n"
|
||||
f"*Expectancy (Ratio):* `{expectancy:.2f} ({expectancy_ratio:.2f})`"
|
||||
)
|
||||
if stats['closed_trade_count'] > 0:
|
||||
markdown_msg += (
|
||||
|
|
|
@ -34,6 +34,7 @@ class Webhook(RPCHandler):
|
|||
self._format = self._config['webhook'].get('format', 'form')
|
||||
self._retries = self._config['webhook'].get('retries', 0)
|
||||
self._retry_delay = self._config['webhook'].get('retry_delay', 0.1)
|
||||
self._timeout = self._config['webhook'].get('timeout', 10)
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""
|
||||
|
@ -107,12 +108,13 @@ class Webhook(RPCHandler):
|
|||
|
||||
try:
|
||||
if self._format == 'form':
|
||||
response = post(self._url, data=payload)
|
||||
response = post(self._url, data=payload, timeout=self._timeout)
|
||||
elif self._format == 'json':
|
||||
response = post(self._url, json=payload)
|
||||
response = post(self._url, json=payload, timeout=self._timeout)
|
||||
elif self._format == 'raw':
|
||||
response = post(self._url, data=payload['data'],
|
||||
headers={'Content-Type': 'text/plain'})
|
||||
headers={'Content-Type': 'text/plain'},
|
||||
timeout=self._timeout)
|
||||
else:
|
||||
raise NotImplementedError(f'Unknown format: {self._format}')
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user