mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-10 02:12:01 +00:00
Merge pull request #7954 from freqtrade/new_release
New release 2022.12
This commit is contained in:
commit
9a46613975
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -20,7 +20,7 @@ Please do not use bug reports to request new features.
|
|||
* Operating system: ____
|
||||
* Python Version: _____ (`python -V`)
|
||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
|
||||
Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.
|
||||
|
||||
|
|
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -18,7 +18,7 @@ Have you search for this feature before requesting it? It's highly likely that a
|
|||
* Operating system: ____
|
||||
* Python Version: _____ (`python -V`)
|
||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
|
||||
|
||||
## Describe the enhancement
|
||||
|
|
2
.github/ISSUE_TEMPLATE/question.md
vendored
2
.github/ISSUE_TEMPLATE/question.md
vendored
|
@ -18,7 +18,7 @@ Please do not use the question template to report bugs or to request new feature
|
|||
* Operating system: ____
|
||||
* Python Version: _____ (`python -V`)
|
||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||
|
||||
## Your question
|
||||
|
||||
|
|
82
.github/workflows/ci.yml
vendored
82
.github/workflows/ci.yml
vendored
|
@ -66,12 +66,6 @@ jobs:
|
|||
- name: Tests
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
||||
if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04'
|
||||
|
||||
- name: Tests incl. ccxt compatibility tests
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
|
||||
if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04'
|
||||
|
||||
- name: Coveralls
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04')
|
||||
|
@ -94,7 +88,7 @@ jobs:
|
|||
run: |
|
||||
cp config_examples/config_bittrex.example.json config.json
|
||||
freqtrade create-userdir --userdir user_data
|
||||
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
||||
freqtrade hyperopt --datadir tests/testdata -e 6 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
||||
|
||||
- name: Flake8
|
||||
run: |
|
||||
|
@ -154,6 +148,19 @@ jobs:
|
|||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
# homebrew fails to update python due to unlinking failures
|
||||
# https://github.com/actions/runner-images/issues/6817
|
||||
rm /usr/local/bin/2to3 || true
|
||||
rm /usr/local/bin/2to3-3.11 || true
|
||||
rm /usr/local/bin/idle3 || true
|
||||
rm /usr/local/bin/idle3.11 || true
|
||||
rm /usr/local/bin/pydoc3 || true
|
||||
rm /usr/local/bin/pydoc3.11 || true
|
||||
rm /usr/local/bin/python3 || true
|
||||
rm /usr/local/bin/python3.11 || true
|
||||
rm /usr/local/bin/python3-config || true
|
||||
rm /usr/local/bin/python3.11-config || true
|
||||
|
||||
brew install hdf5 c-blosc
|
||||
python -m pip install --upgrade pip wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
|
@ -310,9 +317,64 @@ jobs:
|
|||
details: Freqtrade doc test failed!
|
||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
|
||||
|
||||
build_linux_online:
|
||||
# Run pytest with "live" checks
|
||||
runs-on: ubuntu-22.04
|
||||
# permissions:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Cache_dependencies
|
||||
uses: actions/cache@v3
|
||||
id: cache
|
||||
with:
|
||||
path: ~/dependencies/
|
||||
key: ${{ runner.os }}-dependencies
|
||||
|
||||
- name: pip cache (linux)
|
||||
uses: actions/cache@v3
|
||||
if: runner.os == 'Linux'
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||
|
||||
- name: TA binary *nix
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||
|
||||
- name: Installation - *nix
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
python -m pip install --upgrade pip wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
pip install -r requirements-dev.txt
|
||||
pip install -e .
|
||||
|
||||
- name: Tests incl. ccxt compatibility tests
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
|
||||
|
||||
|
||||
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||
notify-complete:
|
||||
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ]
|
||||
needs: [
|
||||
build_linux,
|
||||
build_macos,
|
||||
build_windows,
|
||||
docs_check,
|
||||
mypy_version_check,
|
||||
pre-commit,
|
||||
build_linux_online
|
||||
]
|
||||
runs-on: ubuntu-22.04
|
||||
# Discord notification can't handle schedule events
|
||||
if: (github.event_name != 'schedule')
|
||||
|
@ -361,7 +423,7 @@ jobs:
|
|||
python setup.py sdist bdist_wheel
|
||||
|
||||
- name: Publish to PyPI (Test)
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
uses: pypa/gh-action-pypi-publish@v1.6.4
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
@ -369,7 +431,7 @@ jobs:
|
|||
repository_url: https://test.pypi.org/legacy/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||
uses: pypa/gh-action-pypi-publish@v1.6.4
|
||||
if: (github.event_name == 'release')
|
||||
with:
|
||||
user: __token__
|
||||
|
|
|
@ -15,9 +15,9 @@ repos:
|
|||
additional_dependencies:
|
||||
- types-cachetools==5.2.1
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.28.11.5
|
||||
- types-requests==2.28.11.7
|
||||
- types-tabulate==0.9.0.0
|
||||
- types-python-dateutil==2.8.19.4
|
||||
- types-python-dateutil==2.8.19.5
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# ![freqtrade](https://raw.githubusercontent.com/freqtrade/freqtrade/develop/docs/assets/freqtrade_poweredby.svg)
|
||||
|
||||
[![Freqtrade CI](https://github.com/freqtrade/freqtrade/workflows/Freqtrade%20CI/badge.svg)](https://github.com/freqtrade/freqtrade/actions/)
|
||||
[![DOI](https://joss.theoj.org/papers/10.21105/joss.04864/status.svg)](https://doi.org/10.21105/joss.04864)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/freqtrade/freqtrade/badge.svg?branch=develop&service=github)](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
|
||||
[![Documentation](https://readthedocs.org/projects/freqtrade/badge/)](https://www.freqtrade.io)
|
||||
[![Maintainability](https://api.codeclimate.com/v1/badges/5737e6d668200b7518ff/maintainability)](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
|
||||
|
|
|
@ -7,11 +7,13 @@ export DOCKER_BUILDKIT=1
|
|||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||
TAG_PLOT=${TAG}_plot
|
||||
TAG_FREQAI=${TAG}_freqai
|
||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
||||
TAG_PI="${TAG}_pi"
|
||||
|
||||
TAG_ARM=${TAG}_arm
|
||||
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
||||
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
||||
TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm
|
||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||
|
||||
echo "Running for ${TAG}"
|
||||
|
@ -41,9 +43,11 @@ docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
|||
|
||||
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
||||
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
||||
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl .
|
||||
|
||||
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||
docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
||||
|
||||
# Run backtest
|
||||
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||
|
@ -58,6 +62,7 @@ docker images
|
|||
# docker push ${IMAGE_NAME}
|
||||
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
||||
docker push ${CACHE_IMAGE}:$TAG_ARM
|
||||
|
||||
# Create multi-arch image
|
||||
|
@ -74,6 +79,9 @@ docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT}
|
|||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM} ${CACHE_IMAGE}:${TAG_FREQAI}
|
||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
||||
|
||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM} ${CACHE_IMAGE}:${TAG_FREQAI_RL}
|
||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
||||
|
||||
# Tag as latest for develop builds
|
||||
if [ "${TAG}" = "develop" ]; then
|
||||
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||
TAG_PLOT=${TAG}_plot
|
||||
TAG_FREQAI=${TAG}_freqai
|
||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
||||
TAG_PI="${TAG}_pi"
|
||||
|
||||
PI_PLATFORM="linux/arm/v7"
|
||||
|
@ -51,9 +52,11 @@ docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
|||
|
||||
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
||||
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
||||
docker build --cache-from freqtrade:${TAG_FREQAI} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl .
|
||||
|
||||
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
||||
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
||||
docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL
|
||||
|
||||
# Run backtest
|
||||
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||
|
@ -68,6 +71,7 @@ docker images
|
|||
docker push ${CACHE_IMAGE}
|
||||
docker push ${CACHE_IMAGE}:$TAG_PLOT
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI
|
||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL
|
||||
docker push ${CACHE_IMAGE}:$TAG
|
||||
|
||||
|
||||
|
|
|
@ -79,9 +79,7 @@
|
|||
"test_size": 0.33,
|
||||
"random_state": 1
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"n_estimators": 1000
|
||||
}
|
||||
"model_training_parameters": {}
|
||||
},
|
||||
"bot_name": "",
|
||||
"force_entry_enable": true,
|
||||
|
|
8
docker/Dockerfile.freqai_rl
Normal file
8
docker/Dockerfile.freqai_rl
Normal file
|
@ -0,0 +1,8 @@
|
|||
ARG sourceimage=freqtradeorg/freqtrade
|
||||
ARG sourcetag=develop_freqai
|
||||
FROM ${sourceimage}:${sourcetag}
|
||||
|
||||
# Install dependencies
|
||||
COPY requirements-freqai.txt requirements-freqai-rl.txt /freqtrade/
|
||||
|
||||
RUN pip install -r requirements-freqai-rl.txt --user --no-cache-dir
|
|
@ -100,3 +100,17 @@ freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 2 --enter-re
|
|||
The indicators have to be present in your strategy's main DataFrame (either for your main
|
||||
timeframe or for informative timeframes) otherwise they will simply be ignored in the script
|
||||
output.
|
||||
|
||||
### Filtering the trade output by date
|
||||
|
||||
To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format:
|
||||
|
||||
```
|
||||
--timerange : Timerange to filter output trades, start date inclusive, end date exclusive. e.g. 20220101-20221231
|
||||
```
|
||||
|
||||
For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January:
|
||||
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
|
||||
```
|
||||
|
|
|
@ -583,7 +583,8 @@ To utilize this, you can append `--timeframe-detail 5m` to your regular backtest
|
|||
freqtrade backtesting --strategy AwesomeStrategy --timeframe 1h --timeframe-detail 5m
|
||||
```
|
||||
|
||||
This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe - and for every "open trade candle" (candles where a trade is open) the 5m data will be used to simulate intra-candle movements.
|
||||
This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe, and Entry orders will only be placed at the main timeframe, however Order fills and exit signals will be evaluated at the 5m candle, simulating intra-candle movements.
|
||||
|
||||
All callback functions (`custom_exit()`, `custom_stoploss()`, ... ) will be running for each 5m candle once the trade is opened (so 12 times in the above example of 1h timeframe, and 5m detailed timeframe).
|
||||
|
||||
`--timeframe-detail` must be smaller than the original timeframe, otherwise backtesting will fail to start.
|
||||
|
|
|
@ -5,7 +5,7 @@ You can analyze the results of backtests and trading history easily using Jupyte
|
|||
## Quick start with docker
|
||||
|
||||
Freqtrade provides a docker-compose file which starts up a jupyter lab server.
|
||||
You can run this server using the following command: `docker-compose -f docker/docker-compose-jupyter.yml up`
|
||||
You can run this server using the following command: `docker compose -f docker/docker-compose-jupyter.yml up`
|
||||
|
||||
This will create a dockercontainer running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
||||
Please use the link that's printed in the console after startup for simplified login.
|
||||
|
@ -83,7 +83,7 @@ from pathlib import Path
|
|||
project_root = "somedir/freqtrade"
|
||||
i=0
|
||||
try:
|
||||
os.chdirdir(project_root)
|
||||
os.chdir(project_root)
|
||||
assert Path('LICENSE').is_file()
|
||||
except:
|
||||
while i<4 and (not Path('LICENSE').is_file()):
|
||||
|
|
|
@ -49,6 +49,13 @@ For more information about the [Remote container extension](https://code.visuals
|
|||
New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests.
|
||||
If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you).
|
||||
|
||||
#### How to run tests
|
||||
|
||||
Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly
|
||||
|
||||
!!! Note "feature branches"
|
||||
Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet.
|
||||
|
||||
#### Checking log content in tests
|
||||
|
||||
Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages).
|
||||
|
|
|
@ -4,20 +4,22 @@ This page explains how to run the bot with Docker. It is not meant to work out o
|
|||
|
||||
## Install Docker
|
||||
|
||||
Start by downloading and installing Docker CE for your platform:
|
||||
Start by downloading and installing Docker / Docker Desktop for your platform:
|
||||
|
||||
* [Mac](https://docs.docker.com/docker-for-mac/install/)
|
||||
* [Windows](https://docs.docker.com/docker-for-windows/install/)
|
||||
* [Linux](https://docs.docker.com/install/)
|
||||
|
||||
To simplify running freqtrade, [`docker-compose`](https://docs.docker.com/compose/install/) should be installed and available to follow the below [docker quick start guide](#docker-quick-start).
|
||||
!!! Info "Docker compose install"
|
||||
Freqtrade documentation assumes the use of Docker desktop (or the docker compose plugin).
|
||||
While the docker-compose standalone installation still works, it will require changing all `docker compose` commands from `docker compose` to `docker-compose` to work (e.g. `docker compose up -d` will become `docker-compose up -d`).
|
||||
|
||||
## Freqtrade with docker-compose
|
||||
## Freqtrade with docker
|
||||
|
||||
Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker-compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage.
|
||||
Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage.
|
||||
|
||||
!!! Note
|
||||
- The following section assumes that `docker` and `docker-compose` are installed and available to the logged in user.
|
||||
- The following section assumes that `docker` is installed and available to the logged in user.
|
||||
- All below commands use relative directories and will have to be executed from the directory containing the `docker-compose.yml` file.
|
||||
|
||||
### Docker quick start
|
||||
|
@ -31,13 +33,13 @@ cd ft_userdata/
|
|||
curl https://raw.githubusercontent.com/freqtrade/freqtrade/stable/docker-compose.yml -o docker-compose.yml
|
||||
|
||||
# Pull the freqtrade image
|
||||
docker-compose pull
|
||||
docker compose pull
|
||||
|
||||
# Create user directory structure
|
||||
docker-compose run --rm freqtrade create-userdir --userdir user_data
|
||||
docker compose run --rm freqtrade create-userdir --userdir user_data
|
||||
|
||||
# Create configuration - Requires answering interactive questions
|
||||
docker-compose run --rm freqtrade new-config --config user_data/config.json
|
||||
docker compose run --rm freqtrade new-config --config user_data/config.json
|
||||
```
|
||||
|
||||
The above snippet creates a new directory called `ft_userdata`, downloads the latest compose file and pulls the freqtrade image.
|
||||
|
@ -64,7 +66,7 @@ The `SampleStrategy` is run by default.
|
|||
Once this is done, you're ready to launch the bot in trading mode (Dry-run or Live-trading, depending on your answer to the corresponding question you made above).
|
||||
|
||||
``` bash
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
!!! Warning "Default configuration"
|
||||
|
@ -84,27 +86,27 @@ You can now access the UI by typing localhost:8080 in your browser.
|
|||
|
||||
#### Monitoring the bot
|
||||
|
||||
You can check for running instances with `docker-compose ps`.
|
||||
You can check for running instances with `docker compose ps`.
|
||||
This should list the service `freqtrade` as `running`. If that's not the case, best check the logs (see next point).
|
||||
|
||||
#### Docker-compose logs
|
||||
#### Docker compose logs
|
||||
|
||||
Logs will be written to: `user_data/logs/freqtrade.log`.
|
||||
You can also check the latest log with the command `docker-compose logs -f`.
|
||||
You can also check the latest log with the command `docker compose logs -f`.
|
||||
|
||||
#### Database
|
||||
|
||||
The database will be located at: `user_data/tradesv3.sqlite`
|
||||
|
||||
#### Updating freqtrade with docker-compose
|
||||
#### Updating freqtrade with docker
|
||||
|
||||
Updating freqtrade when using `docker-compose` is as simple as running the following 2 commands:
|
||||
Updating freqtrade when using `docker` is as simple as running the following 2 commands:
|
||||
|
||||
``` bash
|
||||
# Download the latest image
|
||||
docker-compose pull
|
||||
docker compose pull
|
||||
# Restart the image
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This will first pull the latest image, and will then restart the container with the just pulled version.
|
||||
|
@ -116,43 +118,43 @@ This will first pull the latest image, and will then restart the container with
|
|||
|
||||
Advanced users may edit the docker-compose file further to include all possible options or arguments.
|
||||
|
||||
All freqtrade arguments will be available by running `docker-compose run --rm freqtrade <command> <optional arguments>`.
|
||||
All freqtrade arguments will be available by running `docker compose run --rm freqtrade <command> <optional arguments>`.
|
||||
|
||||
!!! Warning "`docker-compose` for trade commands"
|
||||
Trade commands (`freqtrade trade <...>`) should not be ran via `docker-compose run` - but should use `docker-compose up -d` instead.
|
||||
!!! Warning "`docker compose` for trade commands"
|
||||
Trade commands (`freqtrade trade <...>`) should not be ran via `docker compose run` - but should use `docker compose up -d` instead.
|
||||
This makes sure that the container is properly started (including port forwardings) and will make sure that the container will restart after a system reboot.
|
||||
If you intend to use freqUI, please also ensure to adjust the [configuration accordingly](rest-api.md#configuration-with-docker), otherwise the UI will not be available.
|
||||
|
||||
!!! Note "`docker-compose run --rm`"
|
||||
!!! Note "`docker compose run --rm`"
|
||||
Including `--rm` will remove the container after completion, and is highly recommended for all modes except trading mode (running with `freqtrade trade` command).
|
||||
|
||||
??? Note "Using docker without docker-compose"
|
||||
"`docker-compose run --rm`" will require a compose file to be provided.
|
||||
??? Note "Using docker without docker"
|
||||
"`docker compose run --rm`" will require a compose file to be provided.
|
||||
Some freqtrade commands that don't require authentication such as `list-pairs` can be run with "`docker run --rm`" instead.
|
||||
For example `docker run --rm freqtradeorg/freqtrade:stable list-pairs --exchange binance --quote BTC --print-json`.
|
||||
This can be useful for fetching exchange information to add to your `config.json` without affecting your running containers.
|
||||
|
||||
#### Example: Download data with docker-compose
|
||||
#### Example: Download data with docker
|
||||
|
||||
Download backtesting data for 5 days for the pair ETH/BTC and 1h timeframe from Binance. The data will be stored in the directory `user_data/data/` on the host.
|
||||
|
||||
``` bash
|
||||
docker-compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h
|
||||
docker compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h
|
||||
```
|
||||
|
||||
Head over to the [Data Downloading Documentation](data-download.md) for more details on downloading data.
|
||||
|
||||
#### Example: Backtest with docker-compose
|
||||
#### Example: Backtest with docker
|
||||
|
||||
Run backtesting in docker-containers for SampleStrategy and specified timerange of historical data, on 5m timeframe:
|
||||
|
||||
``` bash
|
||||
docker-compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m
|
||||
docker compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m
|
||||
```
|
||||
|
||||
Head over to the [Backtesting Documentation](backtesting.md) to learn more.
|
||||
|
||||
### Additional dependencies with docker-compose
|
||||
### Additional dependencies with docker
|
||||
|
||||
If your strategy requires dependencies not included in the default image - it will be necessary to build the image on your host.
|
||||
For this, please create a Dockerfile containing installation steps for the additional dependencies (have a look at [docker/Dockerfile.custom](https://github.com/freqtrade/freqtrade/blob/develop/docker/Dockerfile.custom) for an example).
|
||||
|
@ -166,15 +168,15 @@ You'll then also need to modify the `docker-compose.yml` file and uncomment the
|
|||
dockerfile: "./Dockerfile.<yourextension>"
|
||||
```
|
||||
|
||||
You can then run `docker-compose build --pull` to build the docker image, and run it using the commands described above.
|
||||
You can then run `docker compose build --pull` to build the docker image, and run it using the commands described above.
|
||||
|
||||
### Plotting with docker-compose
|
||||
### Plotting with docker
|
||||
|
||||
Commands `freqtrade plot-profit` and `freqtrade plot-dataframe` ([Documentation](plotting.md)) are available by changing the image to `*_plot` in your docker-compose.yml file.
|
||||
You can then use these commands as follows:
|
||||
|
||||
``` bash
|
||||
docker-compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805
|
||||
docker compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805
|
||||
```
|
||||
|
||||
The output will be stored in the `user_data/plot` directory, and can be opened with any modern browser.
|
||||
|
@ -185,7 +187,7 @@ Freqtrade provides a docker-compose file which starts up a jupyter lab server.
|
|||
You can run this server using the following command:
|
||||
|
||||
``` bash
|
||||
docker-compose -f docker/docker-compose-jupyter.yml up
|
||||
docker compose -f docker/docker-compose-jupyter.yml up
|
||||
```
|
||||
|
||||
This will create a docker-container running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
||||
|
@ -194,7 +196,7 @@ Please use the link that's printed in the console after startup for simplified l
|
|||
Since part of this image is built on your machine, it is recommended to rebuild the image from time to time to keep freqtrade (and dependencies) up-to-date.
|
||||
|
||||
``` bash
|
||||
docker-compose -f docker/docker-compose-jupyter.yml build --no-cache
|
||||
docker compose -f docker/docker-compose-jupyter.yml build --no-cache
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
|
|
@ -54,6 +54,9 @@ This configuration enables kraken, as well as rate-limiting to avoid bans from t
|
|||
|
||||
## Binance
|
||||
|
||||
!!! Warning "Server location and geo-ip restrictions"
|
||||
Please be aware that binance restrict api access regarding the server country. The currents and non exhaustive countries blocked are United States, Malaysia (Singapour), Ontario (Canada). Please go to [binance terms > b. Eligibility](https://www.binance.com/en/terms) to find up to date list.
|
||||
|
||||
Binance supports [time_in_force](configuration.md#understand-order_time_in_force).
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
|
|
|
@ -26,10 +26,7 @@ FreqAI is configured through the typical [Freqtrade config file](configuration.m
|
|||
},
|
||||
"data_split_parameters" : {
|
||||
"test_size": 0.25
|
||||
},
|
||||
"model_training_parameters" : {
|
||||
"n_estimators": 100
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -118,7 +115,7 @@ The FreqAI strategy requires including the following lines of code in the standa
|
|||
|
||||
```
|
||||
|
||||
Notice how the `populate_any_indicators()` is where [features](freqai-feature-engineering.md#feature-engineering) and labels/targets are added. A full example strategy is available in `templates/FreqaiExampleStrategy.py`.
|
||||
Notice how the `populate_any_indicators()` is where [features](freqai-feature-engineering.md#feature-engineering) and labels/targets are added. A full example strategy is available in `templates/FreqaiExampleStrategy.py`.
|
||||
|
||||
Notice also the location of the labels under `if set_generalized_indicators:` at the bottom of the example. This is where single features and labels/targets should be added to the feature set to avoid duplication of them from various configuration parameters that multiply the feature set, such as `include_timeframes`.
|
||||
|
||||
|
@ -182,7 +179,7 @@ The `startup_candle_count` in the FreqAI strategy needs to be set up in the same
|
|||
|
||||
## Creating a dynamic target threshold
|
||||
|
||||
Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. FreqAI allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out.
|
||||
Deciding when to enter or exit a trade can be done in a dynamic way to reflect current market conditions. FreqAI allows you to return additional information from the training of a model (more info [here](freqai-feature-engineering.md#returning-additional-info-from-training)). For example, the `&*_std/mean` return values describe the statistical distribution of the target/label *during the most recent training*. Comparing a given prediction to these values allows you to know the rarity of the prediction. In `templates/FreqaiExampleStrategy.py`, the `target_roi` and `sell_roi` are defined to be 1.25 z-scores away from the mean which causes predictions that are closer to the mean to be filtered out.
|
||||
|
||||
```python
|
||||
dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std"] * 1.25
|
||||
|
@ -230,7 +227,7 @@ If you want to predict multiple targets, you need to define multiple labels usin
|
|||
|
||||
#### Classifiers
|
||||
|
||||
If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set
|
||||
If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set
|
||||
|
||||
```python
|
||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||
|
|
|
@ -4,22 +4,30 @@ The table below will list all configuration parameters available for FreqAI. Som
|
|||
|
||||
Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways.
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **General configuration parameters**
|
||||
| | **General configuration parameters within the `config.freqai` tree**
|
||||
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
||||
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
||||
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
||||
| `identifier` | **Required.** <br> A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data. <br> **Datatype:** String.
|
||||
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> **Datatype:** Float > 0. <br> Default: `0` (models retrain as often as possible).
|
||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> **Datatype:** Positive integer. <br> Default: `0` (models never expire).
|
||||
| `purge_old_models` | Delete obsolete models. <br> **Datatype:** Boolean. <br> Default: `False` (all historic models remain on disk).
|
||||
| `purge_old_models` | Delete all unused models during live runs (not relevant to backtesting). If set to false (not default), dry/live runs will accumulate all unused models to disk. If <br> **Datatype:** Boolean. <br> Default: `True`.
|
||||
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
||||
| `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
||||
| | **Feature parameters**
|
||||
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
|
||||
|
||||
### Feature parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **Feature parameters within the `freqai.feature_parameters` sub dictionary**
|
||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
||||
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
||||
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset. <br> **Datatype:** List of assets (strings).
|
||||
|
@ -29,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
|
||||
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
|
||||
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/<identifier>/sub-train-<COIN>_<timestamp>.html`. <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
|
||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||
|
@ -38,16 +46,49 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
|||
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||
| | **Data split parameters**
|
||||
|
||||
### Data split parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **Data split parameters within the `freqai.data_split_parameters` sub dictionary**
|
||||
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||
| `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean. <br> Defaut: `False`.
|
||||
| | **Model training parameters**
|
||||
|
||||
### Model training parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary**
|
||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. A list of the currently available models can be found [here](freqai-configuration.md#using-different-prediction-models). <br> **Datatype:** Dictionary.
|
||||
| `n_estimators` | The number of boosted trees to fit in the training of the model. <br> **Datatype:** Integer.
|
||||
| `learning_rate` | Boosting learning rate during training of the model. <br> **Datatype:** Float.
|
||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||
|
||||
### Reinforcement Learning parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary**
|
||||
| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary.
|
||||
| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer.
|
||||
| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process. <br> **Datatype:** int.
|
||||
| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function. <br> **Datatype:** int.
|
||||
| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string.
|
||||
| `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string.
|
||||
| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training. <br> **Datatype:** float. <br> Default: 0.8
|
||||
| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1. <br> **Datatype:** int.
|
||||
| `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int.
|
||||
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
|
||||
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
|
||||
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
||||
|
||||
### Additional parameters
|
||||
|
||||
| Parameter | Description |
|
||||
|------------|-------------|
|
||||
| | **Extraneous parameters**
|
||||
| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||
| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `freqai.keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
|
|
286
docs/freqai-reinforcement-learning.md
Normal file
286
docs/freqai-reinforcement-learning.md
Normal file
|
@ -0,0 +1,286 @@
|
|||
# Reinforcement Learning
|
||||
|
||||
!!! Note "Installation size"
|
||||
Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?".
|
||||
Users who prefer docker should ensure they use the docker image appended with `_freqairl`.
|
||||
|
||||
## Background and terminology
|
||||
|
||||
### What is RL and why does FreqAI need it?
|
||||
|
||||
Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network.
|
||||
|
||||
A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.*
|
||||
|
||||
Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors.
|
||||
|
||||
### The RL interface
|
||||
|
||||
With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function).
|
||||
|
||||
We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely.
|
||||
|
||||
The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library.
|
||||
|
||||
### Important considerations
|
||||
|
||||
As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world.
|
||||
|
||||
## Running Reinforcement Learning
|
||||
|
||||
Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line:
|
||||
|
||||
```bash
|
||||
freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json
|
||||
```
|
||||
|
||||
where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner` (or a custom user defined one located in `user_data/freqaimodels`). The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor:
|
||||
|
||||
```python
|
||||
def populate_any_indicators(
|
||||
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||
):
|
||||
|
||||
if informative is None:
|
||||
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||
|
||||
# first loop is automatically duplicating indicators for time periods
|
||||
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||
|
||||
t = int(t)
|
||||
informative[f"%-{pair}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||
informative[f"%-{pair}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||
|
||||
# The following raw price values are necessary for RL models
|
||||
informative[f"%-{pair}raw_close"] = informative["close"]
|
||||
informative[f"%-{pair}raw_open"] = informative["open"]
|
||||
informative[f"%-{pair}raw_high"] = informative["high"]
|
||||
informative[f"%-{pair}raw_low"] = informative["low"]
|
||||
|
||||
indicators = [col for col in informative if col.startswith("%")]
|
||||
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||
if n == 0:
|
||||
continue
|
||||
informative_shift = informative[indicators].shift(n)
|
||||
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||
informative = pd.concat((informative, informative_shift), axis=1)
|
||||
|
||||
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||
skip_columns = [
|
||||
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||
]
|
||||
df = df.drop(columns=skip_columns)
|
||||
|
||||
# Add generalized indicators here (because in live, it will call this
|
||||
# function to populate indicators during training). Notice how we ensure not to
|
||||
# add them multiple times
|
||||
if set_generalized_indicators:
|
||||
|
||||
# For RL, there are no direct targets to set. This is filler (neutral)
|
||||
# until the agent sends an action.
|
||||
df["&-action"] = 0
|
||||
|
||||
return df
|
||||
```
|
||||
|
||||
Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:
|
||||
|
||||
```python
|
||||
# The following features are necessary for RL models
|
||||
informative[f"%-{pair}raw_close"] = informative["close"]
|
||||
informative[f"%-{pair}raw_open"] = informative["open"]
|
||||
informative[f"%-{pair}raw_high"] = informative["high"]
|
||||
informative[f"%-{pair}raw_low"] = informative["low"]
|
||||
```
|
||||
|
||||
Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
|
||||
|
||||
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:
|
||||
|
||||
```python
|
||||
def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
|
||||
enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1]
|
||||
|
||||
if enter_long_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
|
||||
] = (1, "long")
|
||||
|
||||
enter_short_conditions = [df["do_predict"] == 1, df["&-action"] == 3]
|
||||
|
||||
if enter_short_conditions:
|
||||
df.loc[
|
||||
reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"]
|
||||
] = (1, "short")
|
||||
|
||||
return df
|
||||
|
||||
def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
|
||||
exit_long_conditions = [df["do_predict"] == 1, df["&-action"] == 2]
|
||||
if exit_long_conditions:
|
||||
df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1
|
||||
|
||||
exit_short_conditions = [df["do_predict"] == 1, df["&-action"] == 4]
|
||||
if exit_short_conditions:
|
||||
df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1
|
||||
|
||||
return df
|
||||
```
|
||||
|
||||
It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short.
|
||||
|
||||
## Configuring the Reinforcement Learner
|
||||
|
||||
In order to configure the `Reinforcement Learner` the following dictionary must exist in the `freqai` config:
|
||||
|
||||
```json
|
||||
"rl_config": {
|
||||
"train_cycles": 25,
|
||||
"add_state_info": true,
|
||||
"max_trade_duration_candles": 300,
|
||||
"max_training_drawdown_pct": 0.02,
|
||||
"cpu_count": 8,
|
||||
"model_type": "PPO",
|
||||
"policy_type": "MlpPolicy",
|
||||
"model_reward_parameters": {
|
||||
"rr": 1,
|
||||
"profit_aim": 0.025
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link).
|
||||
|
||||
!!! Note
|
||||
If you would like to experiment with `continual_learning`, then you should set that value to `true` in the main `freqai` configuration dictionary. This will tell the Reinforcement Learning library to continue training new models from the final state of previous models, instead of retraining new models from scratch each time a retrain is initiated.
|
||||
|
||||
!!! Note
|
||||
Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html).
|
||||
|
||||
## Creating a custom reward function
|
||||
|
||||
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
||||
|
||||
```python
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||
|
||||
|
||||
class MyCoolRLModel(ReinforcementLearner):
|
||||
"""
|
||||
User created RL prediction model.
|
||||
|
||||
Save this file to `freqtrade/user_data/freqaimodels`
|
||||
|
||||
then use it with:
|
||||
|
||||
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
|
||||
|
||||
Here the users can override any of the functions
|
||||
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
|
||||
is where the user overrides `MyRLEnv` (see below), to define custom
|
||||
`calculate_reward()` function, or to override any other parts of the environment.
|
||||
|
||||
This class also allows users to override any other part of the IFreqaiModel tree.
|
||||
For example, the user can override `def fit()` or `def train()` or `def predict()`
|
||||
to take fine-tuned control over these processes.
|
||||
|
||||
Another common override may be `def data_cleaning_predict()` where the user can
|
||||
take fine-tuned control over the data handling pipeline.
|
||||
"""
|
||||
class MyRLEnv(Base5ActionRLEnv):
|
||||
"""
|
||||
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||
Users can override any functions from those parent classes. Here is an example
|
||||
of a user customized `calculate_reward()` function.
|
||||
"""
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
# first, penalize if the action is not valid
|
||||
if not self._is_valid(action):
|
||||
return -2
|
||||
pnl = self.get_unrealized_profit()
|
||||
|
||||
factor = 100
|
||||
# reward agent for entering trades
|
||||
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
||||
and self._position == Positions.Neutral:
|
||||
return 25
|
||||
# discourage agent from not entering trades
|
||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||
return -1
|
||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||
trade_duration = self._current_tick - self._last_trade_tick
|
||||
if trade_duration <= max_trade_duration:
|
||||
factor *= 1.5
|
||||
elif trade_duration > max_trade_duration:
|
||||
factor *= 0.5
|
||||
# discourage sitting in position
|
||||
if self._position in (Positions.Short, Positions.Long) and \
|
||||
action == Actions.Neutral.value:
|
||||
return -1 * trade_duration / max_trade_duration
|
||||
# close long
|
||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
# close short
|
||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
return 0.
|
||||
```
|
||||
|
||||
### Using Tensorboard
|
||||
|
||||
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
|
||||
|
||||
```bash
|
||||
cd freqtrade
|
||||
tensorboard --logdir user_data/models/unique-id
|
||||
```
|
||||
|
||||
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard).
|
||||
|
||||
![tensorboard](assets/tensorboard.jpg)
|
||||
|
||||
|
||||
### Custom logging
|
||||
|
||||
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
|
||||
|
||||
|
||||
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
|
||||
|
||||
```py
|
||||
class MyRLEnv(Base5ActionRLEnv):
|
||||
"""
|
||||
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||
Users can override any functions from those parent classes. Here is an example
|
||||
of a user customized `calculate_reward()` function.
|
||||
"""
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
if not self._is_valid(action):
|
||||
self.tensorboard_log("is_valid")
|
||||
return -2
|
||||
|
||||
```
|
||||
|
||||
!!! Note
|
||||
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)` would add 0.23 to `float_metric`. In this case you can also disable incrementing using `inc=False` parameter.
|
||||
|
||||
|
||||
### Choosing a base environment
|
||||
|
||||
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:
|
||||
|
||||
* the actions available in the `calculate_reward`
|
||||
* the actions consumed by the user strategy
|
||||
|
||||
All of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`.
|
||||
|
||||
!!! Note
|
||||
Only the `Base3ActionRLEnv` can do long-only training/trading (set the user strategy attribute `can_short = False`).
|
|
@ -79,16 +79,11 @@ To change your **features**, you **must** set a new `identifier` in the config t
|
|||
|
||||
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
||||
|
||||
### Backtest live models
|
||||
### Backtest live collected predictions
|
||||
|
||||
FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `True` in the config.
|
||||
FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study.
|
||||
|
||||
The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models.
|
||||
|
||||
Each model has an identifier derived from the training end date. If you have only 1 model trained, FreqAI will backtest from the training end date until the current date. If you have more than 1 model, each model will perform the backtesting according to the training end date until the training end date of the next model and so on. For the last model, the period of the previous model will be used for the execution.
|
||||
|
||||
!!! Note
|
||||
Currently, there is no checking for expired models, even if the `expired_hours` parameter is set.
|
||||
The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in the historic predictions file.
|
||||
|
||||
|
||||
### Downloading data to cover the full backtest period
|
||||
|
|
|
@ -72,11 +72,25 @@ pip install -r requirements-freqai.txt
|
|||
|
||||
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||
|
||||
|
||||
### FreqAI position in open-source machine learning landscape
|
||||
|
||||
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data-exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data-collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
||||
|
||||
### Citing FreqAI
|
||||
|
||||
FreqAI is [published in the Journal of Open Source Software](https://joss.theoj.org/papers/10.21105/joss.04864). If you find FreqAI useful in your research, please use the following citation:
|
||||
|
||||
```bibtex
|
||||
@article{Caulk2022,
|
||||
doi = {10.21105/joss.04864},
|
||||
url = {https://doi.org/10.21105/joss.04864},
|
||||
year = {2022}, publisher = {The Open Journal},
|
||||
volume = {7}, number = {80}, pages = {4864},
|
||||
author = {Robert A. Caulk and Elin Törnquist and Matthias Voppichler and Andrew R. Lawless and Ryan McMullan and Wagner Costa Santos and Timothy C. Pogue and Johan van der Vlugt and Stefan P. Gehring and Pascal Schmidt},
|
||||
title = {FreqAI: generalizing adaptive modeling for chaotic time-series market forecasts},
|
||||
journal = {Journal of Open Source Software} }
|
||||
```
|
||||
|
||||
## Common pitfalls
|
||||
|
||||
FreqAI cannot be combined with dynamic `VolumePairlists` (or any pairlist filter that adds and removes pairs dynamically).
|
||||
|
@ -99,6 +113,8 @@ Code review and software architecture brainstorming:
|
|||
|
||||
Software development:
|
||||
Wagner Costa @wagnercosta
|
||||
Emre Suzen @aemr3
|
||||
Timothy Pogue @wizrds
|
||||
|
||||
Beta testing and bug reporting:
|
||||
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza, Timothy Pogue @wizrds
|
||||
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza
|
||||
|
|
|
@ -23,6 +23,7 @@ You may also use something like `.*DOWN/BTC` or `.*UP/BTC` to exclude leveraged
|
|||
* [`StaticPairList`](#static-pair-list) (default, if not configured differently)
|
||||
* [`VolumePairList`](#volume-pair-list)
|
||||
* [`ProducerPairList`](#producerpairlist)
|
||||
* [`RemotePairList`](#remotepairlist)
|
||||
* [`AgeFilter`](#agefilter)
|
||||
* [`OffsetFilter`](#offsetfilter)
|
||||
* [`PerformanceFilter`](#performancefilter)
|
||||
|
@ -173,6 +174,48 @@ You can limit the length of the pairlist with the optional parameter `number_ass
|
|||
`ProducerPairList` can also be used multiple times in sequence, combining the pairs from multiple producers.
|
||||
Obviously in complex such configurations, the Producer may not provide data for all pairs, so the strategy must be fit for this.
|
||||
|
||||
#### RemotePairList
|
||||
|
||||
It allows the user to fetch a pairlist from a remote server or a locally stored json file within the freqtrade directory, enabling dynamic updates and customization of the trading pairlist.
|
||||
|
||||
The RemotePairList is defined in the pairlists section of the configuration settings. It uses the following configuration options:
|
||||
|
||||
```json
|
||||
"pairlists": [
|
||||
{
|
||||
"method": "RemotePairList",
|
||||
"pairlist_url": "https://example.com/pairlist",
|
||||
"number_assets": 10,
|
||||
"refresh_period": 1800,
|
||||
"keep_pairlist_on_failure": true,
|
||||
"read_timeout": 60,
|
||||
"bearer_token": "my-bearer-token"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The `pairlist_url` option specifies the URL of the remote server where the pairlist is located, or the path to a local file (if file:/// is prepended). This allows the user to use either a remote server or a local file as the source for the pairlist.
|
||||
|
||||
The user is responsible for providing a server or local file that returns a JSON object with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"pairs": ["XRP/USDT", "ETH/USDT", "LTC/USDT"],
|
||||
"refresh_period": 1800,
|
||||
}
|
||||
```
|
||||
|
||||
The `pairs` property should contain a list of strings with the trading pairs to be used by the bot. The `refresh_period` property is optional and specifies the number of seconds that the pairlist should be cached before being refreshed.
|
||||
|
||||
The optional `keep_pairlist_on_failure` specifies whether the previous received pairlist should be used if the remote server is not reachable or returns an error. The default value is true.
|
||||
|
||||
The optional `read_timeout` specifies the maximum amount of time (in seconds) to wait for a response from the remote source, The default value is 60.
|
||||
|
||||
The optional `bearer_token` will be included in the requests Authorization Header.
|
||||
|
||||
!!! Note
|
||||
In case of a server error the last received pairlist will be kept if `keep_pairlist_on_failure` is set to true, when set to false a empty pairlist is returned.
|
||||
|
||||
#### AgeFilter
|
||||
|
||||
Removes pairs that have been listed on the exchange for less than `min_days_listed` days (defaults to `10`) or more than `max_days_listed` days (defaults `None` mean infinity).
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
![freqtrade](assets/freqtrade_poweredby.svg)
|
||||
|
||||
[![Freqtrade CI](https://github.com/freqtrade/freqtrade/workflows/Freqtrade%20CI/badge.svg)](https://github.com/freqtrade/freqtrade/actions/)
|
||||
[![DOI](https://joss.theoj.org/papers/10.21105/joss.04864/status.svg)](https://doi.org/10.21105/joss.04864)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/freqtrade/freqtrade/badge.svg?branch=develop&service=github)](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
|
||||
[![Maintainability](https://api.codeclimate.com/v1/badges/5737e6d668200b7518ff/maintainability)](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
|
||||
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
{% endif %}
|
||||
<div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" {{ hidden }}>
|
||||
<div class="md-sidebar__scrollwrap">
|
||||
<div id="widget-wrapper">
|
||||
|
||||
</div>
|
||||
<div class="md-sidebar__inner">
|
||||
{% include "partials/nav.html" %}
|
||||
</div>
|
||||
|
@ -44,25 +41,4 @@
|
|||
<script src="https://code.jquery.com/jquery-3.4.1.min.js"
|
||||
integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>
|
||||
|
||||
<!-- Load binance SDK -->
|
||||
<script async defer src="https://public.bnbstatic.com/static/js/broker-sdk/broker-sdk@1.0.0.min.js"></script>
|
||||
|
||||
<script>
|
||||
window.onload = function () {
|
||||
var sidebar = document.getElementById('widget-wrapper')
|
||||
var newDiv = document.createElement("div");
|
||||
newDiv.id = "widget";
|
||||
try {
|
||||
sidebar.prepend(newDiv);
|
||||
|
||||
window.binanceBrokerPortalSdk.initBrokerSDK('#widget', {
|
||||
apiHost: 'https://www.binance.com',
|
||||
brokerId: 'R4BD3S82',
|
||||
slideTime: 4e4,
|
||||
});
|
||||
} catch(err) {
|
||||
console.log(err)
|
||||
}
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
markdown==3.3.7
|
||||
mkdocs==1.4.2
|
||||
mkdocs-material==8.5.10
|
||||
mkdocs-material==8.5.11
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==9.8
|
||||
pymdown-extensions==9.9
|
||||
jinja2==3.1.2
|
||||
|
|
|
@ -13,12 +13,12 @@ Feel free to use a visual Database editor like SqliteBrowser if you feel more co
|
|||
sudo apt-get install sqlite3
|
||||
```
|
||||
|
||||
### Using sqlite3 via docker-compose
|
||||
### Using sqlite3 via docker
|
||||
|
||||
The freqtrade docker image does contain sqlite3, so you can edit the database without having to install anything on the host system.
|
||||
|
||||
``` bash
|
||||
docker-compose exec freqtrade /bin/bash
|
||||
docker compose exec freqtrade /bin/bash
|
||||
sqlite3 <database-file>.sqlite
|
||||
```
|
||||
|
||||
|
|
|
@ -773,7 +773,7 @@ class DigDeeperStrategy(IStrategy):
|
|||
* Sell 100@10\$ -> Avg price: 8.5\$, realized profit 150\$, 17.65%
|
||||
* Buy 150@11\$ -> Avg price: 10\$, realized profit 150\$, 17.65%
|
||||
* Sell 100@12\$ -> Avg price: 10\$, total realized profit 350\$, 20%
|
||||
* Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40%
|
||||
* Sell 150@14\$ -> Avg price: 10\$, total realized profit 950\$, 40% <- *This will be the last "Exit" message*
|
||||
|
||||
The total profit for this trade was 950$ on a 3350$ investment (`100@8$ + 100@9$ + 150@11$`). As such - the final relative profit is 28.35% (`950 / 3350`).
|
||||
|
||||
|
|
|
@ -363,9 +363,9 @@ class AwesomeStrategy(IStrategy):
|
|||
timeframe = "1d"
|
||||
timeframe_mins = timeframe_to_minutes(timeframe)
|
||||
minimal_roi = {
|
||||
"0": 0.05, # 5% for the first 3 candles
|
||||
str(timeframe_mins * 3)): 0.02, # 2% after 3 candles
|
||||
str(timeframe_mins * 6)): 0.01, # 1% After 6 candles
|
||||
"0": 0.05, # 5% for the first 3 candles
|
||||
str(timeframe_mins * 3): 0.02, # 2% after 3 candles
|
||||
str(timeframe_mins * 6): 0.01, # 1% After 6 candles
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -989,38 +989,18 @@ from freqtrade.persistence import Trade
|
|||
The following example queries for the current pair and trades from today, however other filters can easily be added.
|
||||
|
||||
``` python
|
||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||
trades = Trade.get_trades([Trade.pair == metadata['pair'],
|
||||
Trade.open_date > datetime.utcnow() - timedelta(days=1),
|
||||
Trade.is_open.is_(False),
|
||||
]).order_by(Trade.close_date).all()
|
||||
# Summarize profit for this pair.
|
||||
curdayprofit = sum(trade.close_profit for trade in trades)
|
||||
trades = Trade.get_trades_proxy(pair=metadata['pair'],
|
||||
open_date=datetime.now(timezone.utc) - timedelta(days=1),
|
||||
is_open=False,
|
||||
]).order_by(Trade.close_date).all()
|
||||
# Summarize profit for this pair.
|
||||
curdayprofit = sum(trade.close_profit for trade in trades)
|
||||
```
|
||||
|
||||
Get amount of stake_currency currently invested in Trades:
|
||||
|
||||
``` python
|
||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||
total_stakes = Trade.total_open_trades_stakes()
|
||||
```
|
||||
|
||||
Retrieve performance per pair.
|
||||
Returns a List of dicts per pair.
|
||||
|
||||
``` python
|
||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||
performance = Trade.get_overall_performance()
|
||||
```
|
||||
|
||||
Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of 0.015).
|
||||
|
||||
``` json
|
||||
{"pair": "ETH/BTC", "profit": 0.015, "count": 5}
|
||||
```
|
||||
For a full list of available methods, please consult the [Trade object](trade-object.md) documentation.
|
||||
|
||||
!!! Warning
|
||||
Trade history is not available during backtesting or hyperopt.
|
||||
Trade history is not available in `populate_*` methods during backtesting or hyperopt, and will result in empty results.
|
||||
|
||||
## Prevent trades from happening for a specific pair
|
||||
|
||||
|
|
|
@ -2,12 +2,37 @@
|
|||
|
||||
Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.
|
||||
The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location.
|
||||
Please follow the [documentation](https://www.freqtrade.io/en/stable/data-download/) for more details.
|
||||
|
||||
## Setup
|
||||
|
||||
### Change Working directory to repository root
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Change directory
|
||||
# Modify this cell to insure that the output shows the correct path.
|
||||
# Define all paths relative to the project root shown in the cell output
|
||||
project_root = "somedir/freqtrade"
|
||||
i=0
|
||||
try:
|
||||
os.chdirdir(project_root)
|
||||
assert Path('LICENSE').is_file()
|
||||
except:
|
||||
while i<4 and (not Path('LICENSE').is_file()):
|
||||
os.chdir(Path(Path.cwd(), '../'))
|
||||
i+=1
|
||||
project_root = Path.cwd()
|
||||
print(Path.cwd())
|
||||
```
|
||||
|
||||
### Configure Freqtrade environment
|
||||
|
||||
|
||||
```python
|
||||
from freqtrade.configuration import Configuration
|
||||
|
||||
# Customize these according to your needs.
|
||||
|
@ -15,14 +40,14 @@ from freqtrade.configuration import Configuration
|
|||
# Initialize empty configuration object
|
||||
config = Configuration.from_files([])
|
||||
# Optionally (recommended), use existing configuration file
|
||||
# config = Configuration.from_files(["config.json"])
|
||||
# config = Configuration.from_files(["user_data/config.json"])
|
||||
|
||||
# Define some constants
|
||||
config["timeframe"] = "5m"
|
||||
# Name of the strategy class
|
||||
config["strategy"] = "SampleStrategy"
|
||||
# Location of the data
|
||||
data_location = config['datadir']
|
||||
data_location = config["datadir"]
|
||||
# Pair to analyze - Only use one pair here
|
||||
pair = "BTC/USDT"
|
||||
```
|
||||
|
@ -36,12 +61,12 @@ from freqtrade.enums import CandleType
|
|||
candles = load_pair_history(datadir=data_location,
|
||||
timeframe=config["timeframe"],
|
||||
pair=pair,
|
||||
data_format = "hdf5",
|
||||
data_format = "json", # Make sure to update this to your data
|
||||
candle_type=CandleType.SPOT,
|
||||
)
|
||||
|
||||
# Confirm success
|
||||
print("Loaded " + str(len(candles)) + f" rows of data for {pair} from {data_location}")
|
||||
print(f"Loaded {len(candles)} rows of data for {pair} from {data_location}")
|
||||
candles.head()
|
||||
```
|
||||
|
||||
|
@ -232,7 +257,7 @@ graph = generate_candlestick_graph(pair=pair,
|
|||
# Show graph inline
|
||||
# graph.show()
|
||||
|
||||
# Render graph in a seperate window
|
||||
# Render graph in a separate window
|
||||
graph.show(renderer="browser")
|
||||
|
||||
```
|
||||
|
|
|
@ -11,18 +11,3 @@
|
|||
.rst-versions .rst-other-versions {
|
||||
color: white;
|
||||
}
|
||||
|
||||
|
||||
#widget-wrapper {
|
||||
height: calc(220px * 0.5625 + 18px);
|
||||
width: 220px;
|
||||
margin: 0 auto 16px auto;
|
||||
border-style: solid;
|
||||
border-color: var(--md-code-bg-color);
|
||||
border-width: 1px;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
@media screen and (max-width: calc(76.25em - 1px)) {
|
||||
#widget-wrapper { display: none; }
|
||||
}
|
||||
|
|
148
docs/trade-object.md
Normal file
148
docs/trade-object.md
Normal file
|
@ -0,0 +1,148 @@
|
|||
# Trade Object
|
||||
|
||||
## Trade
|
||||
|
||||
A position freqtrade enters is stored in a `Trade` object - which is persisted to the database.
|
||||
It's a core concept of freqtrade - and something you'll come across in many sections of the documentation, which will most likely point you to this location.
|
||||
|
||||
It will be passed to the strategy in many [strategy callbacks](strategy-callbacks.md). The object passed to the strategy cannot be modified directly. Indirect modifications may occur based on callback results.
|
||||
|
||||
## Trade - Available attributes
|
||||
|
||||
The following attributes / properties are available for each individual trade - and can be used with `trade.<property>` (e.g. `trade.pair`).
|
||||
|
||||
| Attribute | DataType | Description |
|
||||
|------------|-------------|-------------|
|
||||
`pair`| string | Pair of this trade
|
||||
`is_open`| boolean | Is the trade currently open, or has it been concluded
|
||||
`open_rate`| float | Rate this trade was entered at (Avg. entry rate in case of trade-adjustments)
|
||||
`close_rate`| float | Close rate - only set when is_open = False
|
||||
`stake_amount`| float | Amount in Stake (or Quote) currency.
|
||||
`amount`| float | Amount in Asset / Base currency that is currently owned.
|
||||
`open_date`| datetime | Timestamp when trade was opened **use `open_date_utc` instead**
|
||||
`open_date_utc`| datetime | Timestamp when trade was opened - in UTC
|
||||
`close_date`| datetime | Timestamp when trade was closed **use `close_date_utc` instead**
|
||||
`close_date_utc`| datetime | Timestamp when trade was closed - in UTC
|
||||
`close_profit`| float | Relative profit at the time of trade closure. `0.01` == 1%
|
||||
`close_profit_abs`| float | Absolute profit (in stake currency) at the time of trade closure.
|
||||
`leverage` | float | Leverage used for this trade - defaults to 1.0 in spot markets.
|
||||
`enter_tag`| string | Tag provided on entry via the `enter_tag` column in the dataframe
|
||||
`is_short` | boolean | True for short trades, False otherwise
|
||||
`orders` | Order[] | List of order objects attached to this trade (includes both filled and cancelled orders)
|
||||
`date_last_filled_utc` | datetime | Time of the last filled order
|
||||
`entry_side` | "buy" / "sell" | Order Side the trade was entered
|
||||
`exit_side` | "buy" / "sell" | Order Side that will result in a trade exit / position reduction.
|
||||
`trade_direction` | "long" / "short" | Trade direction in text - long or short.
|
||||
`nr_of_successful_entries` | int | Number of successful (filled) entry orders
|
||||
`nr_of_successful_exits` | int | Number of successful (filled) exit orders
|
||||
|
||||
## Class methods
|
||||
|
||||
The following are class methods - which return generic information, and usually result in an explicit query against the database.
|
||||
They can be used as `Trade.<method>` - e.g. `open_trades = Trade.get_open_trade_count()`
|
||||
|
||||
!!! Warning "Backtesting/hyperopt"
|
||||
Most methods will work in both backtesting / hyperopt and live/dry modes.
|
||||
During backtesting, it's limited to usage in [strategy callbacks](strategy-callbacks.md). Usage in `populate_*()` methods is not supported and will result in wrong results.
|
||||
|
||||
### get_trades_proxy
|
||||
|
||||
When your strategy needs some information on existing (open or close) trades - it's best to use `Trade.get_trades_proxy()`.
|
||||
|
||||
Usage:
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
from datetime import timedelta
|
||||
|
||||
# ...
|
||||
trade_hist = Trade.get_trades_proxy(pair='ETH/USDT', is_open=False, open_date=current_date - timedelta(days=2))
|
||||
|
||||
```
|
||||
|
||||
`get_trades_proxy()` supports the following keyword arguments. All arguments are optional - calling `get_trades_proxy()` without arguments will return a list of all trades in the database.
|
||||
|
||||
* `pair` e.g. `pair='ETH/USDT'`
|
||||
* `is_open` e.g. `is_open=False`
|
||||
* `open_date` e.g. `open_date=current_date - timedelta(days=2)`
|
||||
* `close_date` e.g. `close_date=current_date - timedelta(days=5)`
|
||||
|
||||
### get_open_trade_count
|
||||
|
||||
Get the number of currently open trades
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
# ...
|
||||
open_trades = Trade.get_open_trade_count()
|
||||
```
|
||||
|
||||
### get_total_closed_profit
|
||||
|
||||
Retrieve the total profit the bot has generated so far.
|
||||
Aggregates `close_profit_abs` for all closed trades.
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
# ...
|
||||
profit = Trade.get_total_closed_profit()
|
||||
```
|
||||
|
||||
### total_open_trades_stakes
|
||||
|
||||
Retrieve the total stake_amount that's currently in trades.
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
# ...
|
||||
profit = Trade.total_open_trades_stakes()
|
||||
```
|
||||
|
||||
### get_overall_performance
|
||||
|
||||
Retrieve the overall performance - similar to the `/performance` telegram command.
|
||||
|
||||
``` python
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
# ...
|
||||
if self.config['runmode'].value in ('live', 'dry_run'):
|
||||
performance = Trade.get_overall_performance()
|
||||
```
|
||||
|
||||
Sample return value: ETH/BTC had 5 trades, with a total profit of 1.5% (ratio of 0.015).
|
||||
|
||||
``` json
|
||||
{"pair": "ETH/BTC", "profit": 0.015, "count": 5}
|
||||
```
|
||||
|
||||
## Order Object
|
||||
|
||||
An `Order` object represents an order on the exchange (or a simulated order in dry-run mode).
|
||||
An `Order` object will always be tied to it's corresponding [`Trade`](#trade-object), and only really makes sense in the context of a trade.
|
||||
|
||||
### Order - Available attributes
|
||||
|
||||
an Order object is typically attached to a trade.
|
||||
Most properties here can be None as they are dependant on the exchange response.
|
||||
|
||||
| Attribute | DataType | Description |
|
||||
|------------|-------------|-------------|
|
||||
`trade` | Trade | Trade object this order is attached to
|
||||
`ft_pair` | string | Pair this order is for
|
||||
`ft_is_open` | boolean | is the order filled?
|
||||
`order_type` | string | Order type as defined on the exchange - usually market, limit or stoploss
|
||||
`status` | string | Status as defined by ccxt. Usually open, closed, expired or canceled
|
||||
`side` | string | Buy or Sell
|
||||
`price` | float | Price the order was placed at
|
||||
`average` | float | Average price the order filled at
|
||||
`amount` | float | Amount in base currency
|
||||
`filled` | float | Filled amount (in base currency)
|
||||
`remaining` | float | Remaining amount
|
||||
`cost` | float | Cost of the order - usually average * filled
|
||||
`order_date` | datetime | Order creation date **use `order_date_utc` instead**
|
||||
`order_date_utc` | datetime | Order creation date (in UTC)
|
||||
`order_fill_date` | datetime | Order fill date **use `order_fill_utc` instead**
|
||||
`order_fill_date_utc` | datetime | Order fill date
|
|
@ -6,14 +6,14 @@ To update your freqtrade installation, please use one of the below methods, corr
|
|||
Breaking changes / changed behavior will be documented in the changelog that is posted alongside every release.
|
||||
For the develop branch, please follow PR's to avoid being surprised by changes.
|
||||
|
||||
## docker-compose
|
||||
## docker
|
||||
|
||||
!!! Note "Legacy installations using the `master` image"
|
||||
We're switching from master to stable for the release Images - please adjust your docker-file and replace `freqtradeorg/freqtrade:master` with `freqtradeorg/freqtrade:stable`
|
||||
|
||||
``` bash
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Installation via setup script
|
||||
|
|
|
@ -652,7 +652,7 @@ Common arguments:
|
|||
|
||||
You can also use webserver mode via docker.
|
||||
Starting a one-off container requires the configuration of the port explicitly, as ports are not exposed by default.
|
||||
You can use `docker-compose run --rm -p 127.0.0.1:8080:8080 freqtrade webserver` to start a one-off container that'll be removed once you stop it. This assumes that port 8080 is still available and no other bot is running on that port.
|
||||
You can use `docker compose run --rm -p 127.0.0.1:8080:8080 freqtrade webserver` to start a one-off container that'll be removed once you stop it. This assumes that port 8080 is still available and no other bot is running on that port.
|
||||
|
||||
Alternatively, you can reconfigure the docker-compose file to have the command updated:
|
||||
|
||||
|
@ -662,7 +662,7 @@ Alternatively, you can reconfigure the docker-compose file to have the command u
|
|||
--config /freqtrade/user_data/config.json
|
||||
```
|
||||
|
||||
You can now use `docker-compose up` to start the webserver.
|
||||
You can now use `docker compose up` to start the webserver.
|
||||
This assumes that the configuration has a webserver enabled and configured for docker (listening port = `0.0.0.0`).
|
||||
|
||||
!!! Tip
|
||||
|
@ -722,6 +722,7 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
|
|||
[--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]]
|
||||
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
||||
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
|
||||
[--timerange YYYYMMDD-[YYYYMMDD]]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
|
@ -744,6 +745,10 @@ optional arguments:
|
|||
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
|
||||
Comma separated list of indicators to analyse. e.g.
|
||||
'close,rsi,bb_lowerband,profit_abs'
|
||||
--timerange YYYYMMDD-[YYYYMMDD]
|
||||
Timerange to filter trades for analysis,
|
||||
start inclusive, end exclusive. e.g.
|
||||
20220101-20220201
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
""" Freqtrade bot """
|
||||
__version__ = '2022.11'
|
||||
__version__ = '2022.12'
|
||||
|
||||
if 'dev' in __version__:
|
||||
try:
|
||||
|
|
|
@ -60,10 +60,4 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
|
|||
|
||||
logger.info('Starting freqtrade in analysis mode')
|
||||
|
||||
process_entry_exit_reasons(config['exportfilename'],
|
||||
config['exchange']['pair_whitelist'],
|
||||
config['analysis_groups'],
|
||||
config['enter_reason_list'],
|
||||
config['exit_reason_list'],
|
||||
config['indicator_list']
|
||||
)
|
||||
process_entry_exit_reasons(config)
|
||||
|
|
|
@ -106,7 +106,7 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
|
|||
"disableparamexport", "backtest_breakdown"]
|
||||
|
||||
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
||||
"exit_reason_list", "indicator_list"]
|
||||
"exit_reason_list", "indicator_list", "timerange"]
|
||||
|
||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
||||
|
|
|
@ -355,6 +355,13 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any]) -> None:
|
|||
f"Main timeframe of {main_tf} must be smaller or equal to FreqAI "
|
||||
f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}")
|
||||
|
||||
# Ensure that the base timeframe is included in the include_timeframes list
|
||||
if main_tf not in freqai_include_timeframes:
|
||||
feature_parameters = conf.get('freqai', {}).get('feature_parameters', {})
|
||||
include_timeframes = [main_tf] + freqai_include_timeframes
|
||||
conf.get('freqai', {}).get('feature_parameters', {}) \
|
||||
.update({**feature_parameters, 'include_timeframes': include_timeframes})
|
||||
|
||||
|
||||
def _validate_freqai_backtest(conf: Dict[str, Any]) -> None:
|
||||
if conf.get('runmode', RunMode.OTHER) == RunMode.BACKTEST:
|
||||
|
|
|
@ -462,6 +462,9 @@ class Configuration:
|
|||
self._args_to_config(config, argname='indicator_list',
|
||||
logstring='Analysis indicator list: {}')
|
||||
|
||||
self._args_to_config(config, argname='timerange',
|
||||
logstring='Filter trades by timerange: {}')
|
||||
|
||||
def _process_runmode(self, config: Config) -> None:
|
||||
|
||||
self._args_to_config(config, argname='dry_run',
|
||||
|
|
|
@ -31,7 +31,7 @@ HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',
|
|||
'CalmarHyperOptLoss',
|
||||
'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',
|
||||
'ProfitDrawDownHyperOptLoss']
|
||||
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList',
|
||||
AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList',
|
||||
'AgeFilter', 'OffsetFilter', 'PerformanceFilter',
|
||||
'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',
|
||||
'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']
|
||||
|
@ -61,6 +61,7 @@ USERPATH_FREQAIMODELS = 'freqaimodels'
|
|||
|
||||
TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
|
||||
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
|
||||
FULL_DATAFRAME_THRESHOLD = 100
|
||||
|
||||
ENV_VAR_PREFIX = 'FREQTRADE__'
|
||||
|
||||
|
@ -578,9 +579,27 @@ CONF_SCHEMA = {
|
|||
},
|
||||
},
|
||||
"model_training_parameters": {
|
||||
"type": "object"
|
||||
},
|
||||
"rl_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"n_estimators": {"type": "integer", "default": 1000}
|
||||
"train_cycles": {"type": "integer"},
|
||||
"max_trade_duration_candles": {"type": "integer"},
|
||||
"add_state_info": {"type": "boolean", "default": False},
|
||||
"max_training_drawdown_pct": {"type": "number", "default": 0.02},
|
||||
"cpu_count": {"type": "integer", "default": 1},
|
||||
"model_type": {"type": "string", "default": "PPO"},
|
||||
"policy_type": {"type": "string", "default": "MlpPolicy"},
|
||||
"net_arch": {"type": "array", "default": [128, 128]},
|
||||
"randomize_startinng_position": {"type": "boolean", "default": False},
|
||||
"model_reward_parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"rr": {"type": "number", "default": 1},
|
||||
"profit_aim": {"type": "number", "default": 0.025}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -590,9 +609,8 @@ CONF_SCHEMA = {
|
|||
"backtest_period_days",
|
||||
"identifier",
|
||||
"feature_parameters",
|
||||
"data_split_parameters",
|
||||
"model_training_parameters"
|
||||
]
|
||||
"data_split_parameters"
|
||||
]
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ from freqtrade.persistence import LocalTrade, Trade, init_db
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Newest format
|
||||
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'amount', 'open_date', 'close_date',
|
||||
'open_rate', 'close_rate',
|
||||
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount',
|
||||
'open_date', 'close_date', 'open_rate', 'close_rate',
|
||||
'fee_open', 'fee_close', 'trade_duration',
|
||||
'profit_ratio', 'profit_abs', 'exit_reason',
|
||||
'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',
|
||||
|
@ -241,6 +241,33 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s
|
|||
return results
|
||||
|
||||
|
||||
def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Compatibility support for older backtest data.
|
||||
"""
|
||||
df['open_date'] = pd.to_datetime(df['open_date'],
|
||||
utc=True,
|
||||
infer_datetime_format=True
|
||||
)
|
||||
df['close_date'] = pd.to_datetime(df['close_date'],
|
||||
utc=True,
|
||||
infer_datetime_format=True
|
||||
)
|
||||
# Compatibility support for pre short Columns
|
||||
if 'is_short' not in df.columns:
|
||||
df['is_short'] = False
|
||||
if 'leverage' not in df.columns:
|
||||
df['leverage'] = 1.0
|
||||
if 'enter_tag' not in df.columns:
|
||||
df['enter_tag'] = df['buy_tag']
|
||||
df = df.drop(['buy_tag'], axis=1)
|
||||
if 'max_stake_amount' not in df.columns:
|
||||
df['max_stake_amount'] = df['stake_amount']
|
||||
if 'orders' not in df.columns:
|
||||
df['orders'] = None
|
||||
return df
|
||||
|
||||
|
||||
def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame:
|
||||
"""
|
||||
Load backtest data file.
|
||||
|
@ -269,24 +296,7 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
|
|||
data = data['strategy'][strategy]['trades']
|
||||
df = pd.DataFrame(data)
|
||||
if not df.empty:
|
||||
df['open_date'] = pd.to_datetime(df['open_date'],
|
||||
utc=True,
|
||||
infer_datetime_format=True
|
||||
)
|
||||
df['close_date'] = pd.to_datetime(df['close_date'],
|
||||
utc=True,
|
||||
infer_datetime_format=True
|
||||
)
|
||||
# Compatibility support for pre short Columns
|
||||
if 'is_short' not in df.columns:
|
||||
df['is_short'] = 0
|
||||
if 'leverage' not in df.columns:
|
||||
df['leverage'] = 1.0
|
||||
if 'enter_tag' not in df.columns:
|
||||
df['enter_tag'] = df['buy_tag']
|
||||
df = df.drop(['buy_tag'], axis=1)
|
||||
if 'orders' not in df.columns:
|
||||
df['orders'] = None
|
||||
df = _load_backtest_data_df_compatibility(df)
|
||||
|
||||
else:
|
||||
# old format - only with lists.
|
||||
|
|
|
@ -9,14 +9,16 @@ from collections import deque
|
|||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from pandas import DataFrame
|
||||
from pandas import DataFrame, to_timedelta
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config, ListPairsWithTimeframes, PairWithTimeframe
|
||||
from freqtrade.constants import (FULL_DATAFRAME_THRESHOLD, Config, ListPairsWithTimeframes,
|
||||
PairWithTimeframe)
|
||||
from freqtrade.data.history import load_pair_history
|
||||
from freqtrade.enums import CandleType, RPCMessageType, RunMode
|
||||
from freqtrade.exceptions import ExchangeError, OperationalException
|
||||
from freqtrade.exchange import Exchange, timeframe_to_seconds
|
||||
from freqtrade.misc import append_candles_to_dataframe
|
||||
from freqtrade.rpc import RPCManager
|
||||
from freqtrade.util import PeriodicCache
|
||||
|
||||
|
@ -104,13 +106,15 @@ class DataProvider:
|
|||
def _emit_df(
|
||||
self,
|
||||
pair_key: PairWithTimeframe,
|
||||
dataframe: DataFrame
|
||||
dataframe: DataFrame,
|
||||
new_candle: bool
|
||||
) -> None:
|
||||
"""
|
||||
Send this dataframe as an ANALYZED_DF message to RPC
|
||||
|
||||
:param pair_key: PairWithTimeframe tuple
|
||||
:param data: Tuple containing the DataFrame and the datetime it was cached
|
||||
:param dataframe: Dataframe to emit
|
||||
:param new_candle: This is a new candle
|
||||
"""
|
||||
if self.__rpc:
|
||||
self.__rpc.send_msg(
|
||||
|
@ -118,13 +122,18 @@ class DataProvider:
|
|||
'type': RPCMessageType.ANALYZED_DF,
|
||||
'data': {
|
||||
'key': pair_key,
|
||||
'df': dataframe,
|
||||
'df': dataframe.tail(1),
|
||||
'la': datetime.now(timezone.utc)
|
||||
}
|
||||
}
|
||||
)
|
||||
if new_candle:
|
||||
self.__rpc.send_msg({
|
||||
'type': RPCMessageType.NEW_CANDLE,
|
||||
'data': pair_key,
|
||||
})
|
||||
|
||||
def _add_external_df(
|
||||
def _replace_external_df(
|
||||
self,
|
||||
pair: str,
|
||||
dataframe: DataFrame,
|
||||
|
@ -150,6 +159,85 @@ class DataProvider:
|
|||
self.__producer_pairs_df[producer_name][pair_key] = (dataframe, _last_analyzed)
|
||||
logger.debug(f"External DataFrame for {pair_key} from {producer_name} added.")
|
||||
|
||||
def _add_external_df(
|
||||
self,
|
||||
pair: str,
|
||||
dataframe: DataFrame,
|
||||
last_analyzed: datetime,
|
||||
timeframe: str,
|
||||
candle_type: CandleType,
|
||||
producer_name: str = "default"
|
||||
) -> Tuple[bool, int]:
|
||||
"""
|
||||
Append a candle to the existing external dataframe. The incoming dataframe
|
||||
must have at least 1 candle.
|
||||
|
||||
:param pair: pair to get the data for
|
||||
:param timeframe: Timeframe to get data for
|
||||
:param candle_type: Any of the enum CandleType (must match trading mode!)
|
||||
:returns: False if the candle could not be appended, or the int number of missing candles.
|
||||
"""
|
||||
pair_key = (pair, timeframe, candle_type)
|
||||
|
||||
if dataframe.empty:
|
||||
# The incoming dataframe must have at least 1 candle
|
||||
return (False, 0)
|
||||
|
||||
if len(dataframe) >= FULL_DATAFRAME_THRESHOLD:
|
||||
# This is likely a full dataframe
|
||||
# Add the dataframe to the dataprovider
|
||||
self._replace_external_df(
|
||||
pair,
|
||||
dataframe,
|
||||
last_analyzed=last_analyzed,
|
||||
timeframe=timeframe,
|
||||
candle_type=candle_type,
|
||||
producer_name=producer_name
|
||||
)
|
||||
return (True, 0)
|
||||
|
||||
if (producer_name not in self.__producer_pairs_df
|
||||
or pair_key not in self.__producer_pairs_df[producer_name]):
|
||||
# We don't have data from this producer yet,
|
||||
# or we don't have data for this pair_key
|
||||
# return False and 1000 for the full df
|
||||
return (False, 1000)
|
||||
|
||||
existing_df, _ = self.__producer_pairs_df[producer_name][pair_key]
|
||||
|
||||
# CHECK FOR MISSING CANDLES
|
||||
timeframe_delta = to_timedelta(timeframe) # Convert the timeframe to a timedelta for pandas
|
||||
local_last = existing_df.iloc[-1]['date'] # We want the last date from our copy
|
||||
incoming_first = dataframe.iloc[0]['date'] # We want the first date from the incoming
|
||||
|
||||
# Remove existing candles that are newer than the incoming first candle
|
||||
existing_df1 = existing_df[existing_df['date'] < incoming_first]
|
||||
|
||||
candle_difference = (incoming_first - local_last) / timeframe_delta
|
||||
|
||||
# If the difference divided by the timeframe is 1, then this
|
||||
# is the candle we want and the incoming data isn't missing any.
|
||||
# If the candle_difference is more than 1, that means
|
||||
# we missed some candles between our data and the incoming
|
||||
# so return False and candle_difference.
|
||||
if candle_difference > 1:
|
||||
return (False, candle_difference)
|
||||
if existing_df1.empty:
|
||||
appended_df = dataframe
|
||||
else:
|
||||
appended_df = append_candles_to_dataframe(existing_df1, dataframe)
|
||||
|
||||
# Everything is good, we appended
|
||||
self._replace_external_df(
|
||||
pair,
|
||||
appended_df,
|
||||
last_analyzed=last_analyzed,
|
||||
timeframe=timeframe,
|
||||
candle_type=candle_type,
|
||||
producer_name=producer_name
|
||||
)
|
||||
return (True, 0)
|
||||
|
||||
def get_producer_df(
|
||||
self,
|
||||
pair: str,
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data,
|
||||
load_backtest_stats)
|
||||
from freqtrade.exceptions import OperationalException
|
||||
|
@ -152,37 +153,55 @@ def _do_group_table_output(bigdf, glist):
|
|||
logger.warning("Invalid group mask specified.")
|
||||
|
||||
|
||||
def _print_results(analysed_trades, stratname, analysis_groups,
|
||||
enter_reason_list, exit_reason_list,
|
||||
indicator_list, columns=None):
|
||||
if columns is None:
|
||||
columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason']
|
||||
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
df = df.loc[(df[df_date_col] >= timerange.startdt)]
|
||||
if timerange.stoptype == 'date':
|
||||
df = df.loc[(df[df_date_col] < timerange.stopdt)]
|
||||
return df
|
||||
|
||||
bigdf = pd.DataFrame()
|
||||
|
||||
def _select_rows_by_tags(df, enter_reason_list, exit_reason_list):
|
||||
if enter_reason_list and "all" not in enter_reason_list:
|
||||
df = df.loc[(df['enter_reason'].isin(enter_reason_list))]
|
||||
|
||||
if exit_reason_list and "all" not in exit_reason_list:
|
||||
df = df.loc[(df['exit_reason'].isin(exit_reason_list))]
|
||||
return df
|
||||
|
||||
|
||||
def prepare_results(analysed_trades, stratname,
|
||||
enter_reason_list, exit_reason_list,
|
||||
timerange=None):
|
||||
res_df = pd.DataFrame()
|
||||
for pair, trades in analysed_trades[stratname].items():
|
||||
bigdf = pd.concat([bigdf, trades], ignore_index=True)
|
||||
res_df = pd.concat([res_df, trades], ignore_index=True)
|
||||
|
||||
if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns):
|
||||
res_df = _select_rows_within_dates(res_df, timerange)
|
||||
|
||||
if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns):
|
||||
res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list)
|
||||
|
||||
return res_df
|
||||
|
||||
|
||||
def print_results(res_df, analysis_groups, indicator_list):
|
||||
if res_df.shape[0] > 0:
|
||||
if analysis_groups:
|
||||
_do_group_table_output(bigdf, analysis_groups)
|
||||
|
||||
if enter_reason_list and "all" not in enter_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))]
|
||||
|
||||
if exit_reason_list and "all" not in exit_reason_list:
|
||||
bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))]
|
||||
_do_group_table_output(res_df, analysis_groups)
|
||||
|
||||
if "all" in indicator_list:
|
||||
print(bigdf)
|
||||
print(res_df)
|
||||
elif indicator_list is not None:
|
||||
available_inds = []
|
||||
for ind in indicator_list:
|
||||
if ind in bigdf:
|
||||
if ind in res_df:
|
||||
available_inds.append(ind)
|
||||
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
||||
_print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
else:
|
||||
print("\\_ No trades to show")
|
||||
print("\\No trades to show")
|
||||
|
||||
|
||||
def _print_table(df, sortcols=None, show_index=False):
|
||||
|
@ -201,27 +220,34 @@ def _print_table(df, sortcols=None, show_index=False):
|
|||
)
|
||||
|
||||
|
||||
def process_entry_exit_reasons(backtest_dir: Path,
|
||||
pairlist: List[str],
|
||||
analysis_groups: Optional[List[str]] = ["0", "1", "2"],
|
||||
enter_reason_list: Optional[List[str]] = ["all"],
|
||||
exit_reason_list: Optional[List[str]] = ["all"],
|
||||
indicator_list: Optional[List[str]] = []):
|
||||
def process_entry_exit_reasons(config: Config):
|
||||
try:
|
||||
backtest_stats = load_backtest_stats(backtest_dir)
|
||||
analysis_groups = config.get('analysis_groups', [])
|
||||
enter_reason_list = config.get('enter_reason_list', ["all"])
|
||||
exit_reason_list = config.get('exit_reason_list', ["all"])
|
||||
indicator_list = config.get('indicator_list', [])
|
||||
|
||||
timerange = TimeRange.parse_timerange(None if config.get(
|
||||
'timerange') is None else str(config.get('timerange')))
|
||||
|
||||
backtest_stats = load_backtest_stats(config['exportfilename'])
|
||||
|
||||
for strategy_name, results in backtest_stats['strategy'].items():
|
||||
trades = load_backtest_data(backtest_dir, strategy_name)
|
||||
trades = load_backtest_data(config['exportfilename'], strategy_name)
|
||||
|
||||
if not trades.empty:
|
||||
signal_candles = _load_signal_candles(backtest_dir)
|
||||
analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name,
|
||||
trades, signal_candles)
|
||||
_print_results(analysed_trades_dict,
|
||||
strategy_name,
|
||||
analysis_groups,
|
||||
enter_reason_list,
|
||||
exit_reason_list,
|
||||
indicator_list)
|
||||
signal_candles = _load_signal_candles(config['exportfilename'])
|
||||
analysed_trades_dict = _process_candles_and_indicators(
|
||||
config['exchange']['pair_whitelist'], strategy_name,
|
||||
trades, signal_candles)
|
||||
|
||||
res_df = prepare_results(analysed_trades_dict, strategy_name,
|
||||
enter_reason_list, exit_reason_list,
|
||||
timerange=timerange)
|
||||
|
||||
print_results(res_df,
|
||||
analysis_groups,
|
||||
indicator_list)
|
||||
|
||||
except ValueError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
|
|
@ -6,7 +6,7 @@ from freqtrade.enums.exittype import ExitType
|
|||
from freqtrade.enums.hyperoptstate import HyperoptState
|
||||
from freqtrade.enums.marginmode import MarginMode
|
||||
from freqtrade.enums.ordertypevalue import OrderTypeValues
|
||||
from freqtrade.enums.rpcmessagetype import RPCMessageType, RPCRequestType
|
||||
from freqtrade.enums.rpcmessagetype import NO_ECHO_MESSAGES, RPCMessageType, RPCRequestType
|
||||
from freqtrade.enums.runmode import NON_UTIL_MODES, OPTIMIZE_MODES, TRADING_MODES, RunMode
|
||||
from freqtrade.enums.signaltype import SignalDirection, SignalTagType, SignalType
|
||||
from freqtrade.enums.state import State
|
||||
|
|
|
@ -21,6 +21,7 @@ class RPCMessageType(str, Enum):
|
|||
|
||||
WHITELIST = 'whitelist'
|
||||
ANALYZED_DF = 'analyzed_df'
|
||||
NEW_CANDLE = 'new_candle'
|
||||
|
||||
def __repr__(self):
|
||||
return self.value
|
||||
|
@ -35,3 +36,6 @@ class RPCRequestType(str, Enum):
|
|||
|
||||
WHITELIST = 'whitelist'
|
||||
ANALYZED_DF = 'analyzed_df'
|
||||
|
||||
|
||||
NO_ECHO_MESSAGES = (RPCMessageType.ANALYZED_DF, RPCMessageType.WHITELIST, RPCMessageType.NEW_CANDLE)
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
from freqtrade.exchange.common import remove_credentials, MAP_EXCHANGE_CHILDCLASS
|
||||
from freqtrade.exchange.exchange import Exchange
|
||||
# isort: on
|
||||
from freqtrade.exchange.bibox import Bibox
|
||||
from freqtrade.exchange.binance import Binance
|
||||
from freqtrade.exchange.bitpanda import Bitpanda
|
||||
from freqtrade.exchange.bittrex import Bittrex
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
""" Bibox exchange subclass """
|
||||
import logging
|
||||
from typing import Dict
|
||||
|
||||
from freqtrade.exchange import Exchange
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Bibox(Exchange):
|
||||
"""
|
||||
Bibox exchange class. Contains adjustments needed for Freqtrade to work
|
||||
with this exchange.
|
||||
|
||||
Please note that this exchange is not included in the list of exchanges
|
||||
officially supported by the Freqtrade development team. So some features
|
||||
may still not work as expected.
|
||||
"""
|
||||
|
||||
# fetchCurrencies API point requires authentication for Bibox,
|
||||
# so switch it off for Freqtrade load_markets()
|
||||
@property
|
||||
def _ccxt_config(self) -> Dict:
|
||||
# Parameters to add directly to ccxt sync/async initialization.
|
||||
config = {"has": {"fetchCurrencies": False}}
|
||||
config.update(super()._ccxt_config)
|
||||
return config
|
|
@ -31,7 +31,7 @@ class Binance(Exchange):
|
|||
"ccxt_futures_name": "future"
|
||||
}
|
||||
_ft_has_futures: Dict = {
|
||||
"stoploss_order_types": {"limit": "limit", "market": "market"},
|
||||
"stoploss_order_types": {"limit": "stop", "market": "stop_market"},
|
||||
"tickers_have_price": False,
|
||||
}
|
||||
|
||||
|
|
125
freqtrade/freqai/RL/Base3ActionRLEnv.py
Normal file
125
freqtrade/freqai/RL/Base3ActionRLEnv.py
Normal file
|
@ -0,0 +1,125 @@
|
|||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Actions(Enum):
|
||||
Neutral = 0
|
||||
Buy = 1
|
||||
Sell = 2
|
||||
|
||||
|
||||
class Base3ActionRLEnv(BaseEnvironment):
|
||||
"""
|
||||
Base class for a 3 action environment
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.actions = Actions
|
||||
|
||||
def set_action_space(self):
|
||||
self.action_space = spaces.Discrete(len(Actions))
|
||||
|
||||
def step(self, action: int):
|
||||
"""
|
||||
Logic for a single step (incrementing one candle in time)
|
||||
by the agent
|
||||
:param: action: int = the action type that the agent plans
|
||||
to take for the current step.
|
||||
:returns:
|
||||
observation = current state of environment
|
||||
step_reward = the reward from `calculate_reward()`
|
||||
_done = if the agent "died" or if the candles finished
|
||||
info = dict passed back to openai gym lib
|
||||
"""
|
||||
self._done = False
|
||||
self._current_tick += 1
|
||||
|
||||
if self._current_tick == self._end_tick:
|
||||
self._done = True
|
||||
|
||||
self._update_unrealized_total_profit()
|
||||
step_reward = self.calculate_reward(action)
|
||||
self.total_reward += step_reward
|
||||
self.tensorboard_log(self.actions._member_names_[action])
|
||||
|
||||
trade_type = None
|
||||
if self.is_tradesignal(action):
|
||||
if action == Actions.Buy.value:
|
||||
if self._position == Positions.Short:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Long
|
||||
trade_type = "long"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Sell.value and self.can_short:
|
||||
if self._position == Positions.Long:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Short
|
||||
trade_type = "short"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Sell.value and not self.can_short:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
else:
|
||||
print("case not defined")
|
||||
|
||||
if trade_type is not None:
|
||||
self.trade_history.append(
|
||||
{'price': self.current_price(), 'index': self._current_tick,
|
||||
'type': trade_type})
|
||||
|
||||
if (self._total_profit < self.max_drawdown or
|
||||
self._total_unrealized_profit < self.max_drawdown):
|
||||
self._done = True
|
||||
|
||||
self._position_history.append(self._position)
|
||||
|
||||
info = dict(
|
||||
tick=self._current_tick,
|
||||
action=action,
|
||||
total_reward=self.total_reward,
|
||||
total_profit=self._total_profit,
|
||||
position=self._position.value,
|
||||
trade_duration=self.get_trade_duration(),
|
||||
current_profit_pct=self.get_unrealized_profit()
|
||||
)
|
||||
|
||||
observation = self._get_observation()
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is a trade signal
|
||||
e.g.: agent wants a Actions.Buy while it is in a Positions.short
|
||||
"""
|
||||
return (
|
||||
(action == Actions.Buy.value and self._position == Positions.Neutral)
|
||||
or (action == Actions.Sell.value and self._position == Positions.Long)
|
||||
or (action == Actions.Sell.value and self._position == Positions.Neutral
|
||||
and self.can_short)
|
||||
or (action == Actions.Buy.value and self._position == Positions.Short
|
||||
and self.can_short)
|
||||
)
|
||||
|
||||
def _is_valid(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is valid.
|
||||
e.g.: agent wants a Actions.Sell while it is in a Positions.Long
|
||||
"""
|
||||
if self.can_short:
|
||||
return action in [Actions.Buy.value, Actions.Sell.value, Actions.Neutral.value]
|
||||
else:
|
||||
if action == Actions.Sell.value and self._position != Positions.Long:
|
||||
return False
|
||||
return True
|
142
freqtrade/freqai/RL/Base4ActionRLEnv.py
Normal file
142
freqtrade/freqai/RL/Base4ActionRLEnv.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Actions(Enum):
|
||||
Neutral = 0
|
||||
Exit = 1
|
||||
Long_enter = 2
|
||||
Short_enter = 3
|
||||
|
||||
|
||||
class Base4ActionRLEnv(BaseEnvironment):
|
||||
"""
|
||||
Base class for a 4 action environment
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.actions = Actions
|
||||
|
||||
def set_action_space(self):
|
||||
self.action_space = spaces.Discrete(len(Actions))
|
||||
|
||||
def step(self, action: int):
|
||||
"""
|
||||
Logic for a single step (incrementing one candle in time)
|
||||
by the agent
|
||||
:param: action: int = the action type that the agent plans
|
||||
to take for the current step.
|
||||
:returns:
|
||||
observation = current state of environment
|
||||
step_reward = the reward from `calculate_reward()`
|
||||
_done = if the agent "died" or if the candles finished
|
||||
info = dict passed back to openai gym lib
|
||||
"""
|
||||
self._done = False
|
||||
self._current_tick += 1
|
||||
|
||||
if self._current_tick == self._end_tick:
|
||||
self._done = True
|
||||
|
||||
self._update_unrealized_total_profit()
|
||||
step_reward = self.calculate_reward(action)
|
||||
self.total_reward += step_reward
|
||||
self.tensorboard_log(self.actions._member_names_[action])
|
||||
|
||||
trade_type = None
|
||||
if self.is_tradesignal(action):
|
||||
"""
|
||||
Action: Neutral, position: Long -> Close Long
|
||||
Action: Neutral, position: Short -> Close Short
|
||||
|
||||
Action: Long, position: Neutral -> Open Long
|
||||
Action: Long, position: Short -> Close Short and Open Long
|
||||
|
||||
Action: Short, position: Neutral -> Open Short
|
||||
Action: Short, position: Long -> Close Long and Open Short
|
||||
"""
|
||||
|
||||
if action == Actions.Neutral.value:
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
elif action == Actions.Long_enter.value:
|
||||
self._position = Positions.Long
|
||||
trade_type = "long"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Short_enter.value:
|
||||
self._position = Positions.Short
|
||||
trade_type = "short"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Exit.value:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
else:
|
||||
print("case not defined")
|
||||
|
||||
if trade_type is not None:
|
||||
self.trade_history.append(
|
||||
{'price': self.current_price(), 'index': self._current_tick,
|
||||
'type': trade_type})
|
||||
|
||||
if (self._total_profit < self.max_drawdown or
|
||||
self._total_unrealized_profit < self.max_drawdown):
|
||||
self._done = True
|
||||
|
||||
self._position_history.append(self._position)
|
||||
|
||||
info = dict(
|
||||
tick=self._current_tick,
|
||||
action=action,
|
||||
total_reward=self.total_reward,
|
||||
total_profit=self._total_profit,
|
||||
position=self._position.value,
|
||||
trade_duration=self.get_trade_duration(),
|
||||
current_profit_pct=self.get_unrealized_profit()
|
||||
)
|
||||
|
||||
observation = self._get_observation()
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is a trade signal
|
||||
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||
"""
|
||||
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or
|
||||
(action == Actions.Neutral.value and self._position == Positions.Short) or
|
||||
(action == Actions.Neutral.value and self._position == Positions.Long) or
|
||||
(action == Actions.Short_enter.value and self._position == Positions.Short) or
|
||||
(action == Actions.Short_enter.value and self._position == Positions.Long) or
|
||||
(action == Actions.Exit.value and self._position == Positions.Neutral) or
|
||||
(action == Actions.Long_enter.value and self._position == Positions.Long) or
|
||||
(action == Actions.Long_enter.value and self._position == Positions.Short))
|
||||
|
||||
def _is_valid(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is valid.
|
||||
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||
"""
|
||||
# Agent should only try to exit if it is in position
|
||||
if action == Actions.Exit.value:
|
||||
if self._position not in (Positions.Short, Positions.Long):
|
||||
return False
|
||||
|
||||
# Agent should only try to enter if it is not in position
|
||||
if action in (Actions.Short_enter.value, Actions.Long_enter.value):
|
||||
if self._position != Positions.Neutral:
|
||||
return False
|
||||
|
||||
return True
|
152
freqtrade/freqai/RL/Base5ActionRLEnv.py
Normal file
152
freqtrade/freqai/RL/Base5ActionRLEnv.py
Normal file
|
@ -0,0 +1,152 @@
|
|||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Actions(Enum):
|
||||
Neutral = 0
|
||||
Long_enter = 1
|
||||
Long_exit = 2
|
||||
Short_enter = 3
|
||||
Short_exit = 4
|
||||
|
||||
|
||||
class Base5ActionRLEnv(BaseEnvironment):
|
||||
"""
|
||||
Base class for a 5 action environment
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.actions = Actions
|
||||
|
||||
def set_action_space(self):
|
||||
self.action_space = spaces.Discrete(len(Actions))
|
||||
|
||||
def step(self, action: int):
|
||||
"""
|
||||
Logic for a single step (incrementing one candle in time)
|
||||
by the agent
|
||||
:param: action: int = the action type that the agent plans
|
||||
to take for the current step.
|
||||
:returns:
|
||||
observation = current state of environment
|
||||
step_reward = the reward from `calculate_reward()`
|
||||
_done = if the agent "died" or if the candles finished
|
||||
info = dict passed back to openai gym lib
|
||||
"""
|
||||
self._done = False
|
||||
self._current_tick += 1
|
||||
|
||||
if self._current_tick == self._end_tick:
|
||||
self._done = True
|
||||
|
||||
self._update_unrealized_total_profit()
|
||||
step_reward = self.calculate_reward(action)
|
||||
self.total_reward += step_reward
|
||||
self.tensorboard_log(self.actions._member_names_[action])
|
||||
|
||||
trade_type = None
|
||||
if self.is_tradesignal(action):
|
||||
"""
|
||||
Action: Neutral, position: Long -> Close Long
|
||||
Action: Neutral, position: Short -> Close Short
|
||||
|
||||
Action: Long, position: Neutral -> Open Long
|
||||
Action: Long, position: Short -> Close Short and Open Long
|
||||
|
||||
Action: Short, position: Neutral -> Open Short
|
||||
Action: Short, position: Long -> Close Long and Open Short
|
||||
"""
|
||||
|
||||
if action == Actions.Neutral.value:
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
elif action == Actions.Long_enter.value:
|
||||
self._position = Positions.Long
|
||||
trade_type = "long"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Short_enter.value:
|
||||
self._position = Positions.Short
|
||||
trade_type = "short"
|
||||
self._last_trade_tick = self._current_tick
|
||||
elif action == Actions.Long_exit.value:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
elif action == Actions.Short_exit.value:
|
||||
self._update_total_profit()
|
||||
self._position = Positions.Neutral
|
||||
trade_type = "neutral"
|
||||
self._last_trade_tick = None
|
||||
else:
|
||||
print("case not defined")
|
||||
|
||||
if trade_type is not None:
|
||||
self.trade_history.append(
|
||||
{'price': self.current_price(), 'index': self._current_tick,
|
||||
'type': trade_type})
|
||||
|
||||
if (self._total_profit < self.max_drawdown or
|
||||
self._total_unrealized_profit < self.max_drawdown):
|
||||
self._done = True
|
||||
|
||||
self._position_history.append(self._position)
|
||||
|
||||
info = dict(
|
||||
tick=self._current_tick,
|
||||
action=action,
|
||||
total_reward=self.total_reward,
|
||||
total_profit=self._total_profit,
|
||||
position=self._position.value,
|
||||
trade_duration=self.get_trade_duration(),
|
||||
current_profit_pct=self.get_unrealized_profit()
|
||||
)
|
||||
|
||||
observation = self._get_observation()
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is a trade signal
|
||||
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||
"""
|
||||
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or
|
||||
(action == Actions.Neutral.value and self._position == Positions.Short) or
|
||||
(action == Actions.Neutral.value and self._position == Positions.Long) or
|
||||
(action == Actions.Short_enter.value and self._position == Positions.Short) or
|
||||
(action == Actions.Short_enter.value and self._position == Positions.Long) or
|
||||
(action == Actions.Short_exit.value and self._position == Positions.Long) or
|
||||
(action == Actions.Short_exit.value and self._position == Positions.Neutral) or
|
||||
(action == Actions.Long_enter.value and self._position == Positions.Long) or
|
||||
(action == Actions.Long_enter.value and self._position == Positions.Short) or
|
||||
(action == Actions.Long_exit.value and self._position == Positions.Short) or
|
||||
(action == Actions.Long_exit.value and self._position == Positions.Neutral))
|
||||
|
||||
def _is_valid(self, action: int) -> bool:
|
||||
# trade signal
|
||||
"""
|
||||
Determine if the signal is valid.
|
||||
e.g.: agent wants a Actions.Long_exit while it is in a Positions.short
|
||||
"""
|
||||
# Agent should only try to exit if it is in position
|
||||
if action in (Actions.Short_exit.value, Actions.Long_exit.value):
|
||||
if self._position not in (Positions.Short, Positions.Long):
|
||||
return False
|
||||
|
||||
# Agent should only try to enter if it is not in position
|
||||
if action in (Actions.Short_enter.value, Actions.Long_enter.value):
|
||||
if self._position != Positions.Neutral:
|
||||
return False
|
||||
|
||||
return True
|
363
freqtrade/freqai/RL/BaseEnvironment.py
Normal file
363
freqtrade/freqai/RL/BaseEnvironment.py
Normal file
|
@ -0,0 +1,363 @@
|
|||
import logging
|
||||
import random
|
||||
from abc import abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Type, Union
|
||||
|
||||
import gym
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from gym import spaces
|
||||
from gym.utils import seeding
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseActions(Enum):
|
||||
"""
|
||||
Default action space, mostly used for type handling.
|
||||
"""
|
||||
Neutral = 0
|
||||
Long_enter = 1
|
||||
Long_exit = 2
|
||||
Short_enter = 3
|
||||
Short_exit = 4
|
||||
|
||||
|
||||
class Positions(Enum):
|
||||
Short = 0
|
||||
Long = 1
|
||||
Neutral = 0.5
|
||||
|
||||
def opposite(self):
|
||||
return Positions.Short if self == Positions.Long else Positions.Long
|
||||
|
||||
|
||||
class BaseEnvironment(gym.Env):
|
||||
"""
|
||||
Base class for environments. This class is agnostic to action count.
|
||||
Inherited classes customize this to include varying action counts/types,
|
||||
See RL/Base5ActionRLEnv.py and RL/Base4ActionRLEnv.py
|
||||
"""
|
||||
|
||||
def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(),
|
||||
reward_kwargs: dict = {}, window_size=10, starting_point=True,
|
||||
id: str = 'baseenv-1', seed: int = 1, config: dict = {}, live: bool = False,
|
||||
fee: float = 0.0015, can_short: bool = False):
|
||||
"""
|
||||
Initializes the training/eval environment.
|
||||
:param df: dataframe of features
|
||||
:param prices: dataframe of prices to be used in the training environment
|
||||
:param window_size: size of window (temporal) to pass to the agent
|
||||
:param reward_kwargs: extra config settings assigned by user in `rl_config`
|
||||
:param starting_point: start at edge of window or not
|
||||
:param id: string id of the environment (used in backend for multiprocessed env)
|
||||
:param seed: Sets the seed of the environment higher in the gym.Env object
|
||||
:param config: Typical user configuration file
|
||||
:param live: Whether or not this environment is active in dry/live/backtesting
|
||||
:param fee: The fee to use for environmental interactions.
|
||||
:param can_short: Whether or not the environment can short
|
||||
"""
|
||||
self.config = config
|
||||
self.rl_config = config['freqai']['rl_config']
|
||||
self.add_state_info = self.rl_config.get('add_state_info', False)
|
||||
self.id = id
|
||||
self.max_drawdown = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8)
|
||||
self.compound_trades = config['stake_amount'] == 'unlimited'
|
||||
if self.config.get('fee', None) is not None:
|
||||
self.fee = self.config['fee']
|
||||
else:
|
||||
self.fee = fee
|
||||
|
||||
# set here to default 5Ac, but all children envs can override this
|
||||
self.actions: Type[Enum] = BaseActions
|
||||
self.tensorboard_metrics: dict = {}
|
||||
self.can_short = can_short
|
||||
self.live = live
|
||||
if not self.live and self.add_state_info:
|
||||
self.add_state_info = False
|
||||
logger.warning("add_state_info is not available in backtesting. Deactivating.")
|
||||
self.seed(seed)
|
||||
self.reset_env(df, prices, window_size, reward_kwargs, starting_point)
|
||||
|
||||
def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int,
|
||||
reward_kwargs: dict, starting_point=True):
|
||||
"""
|
||||
Resets the environment when the agent fails (in our case, if the drawdown
|
||||
exceeds the user set max_training_drawdown_pct)
|
||||
:param df: dataframe of features
|
||||
:param prices: dataframe of prices to be used in the training environment
|
||||
:param window_size: size of window (temporal) to pass to the agent
|
||||
:param reward_kwargs: extra config settings assigned by user in `rl_config`
|
||||
:param starting_point: start at edge of window or not
|
||||
"""
|
||||
self.df = df
|
||||
self.signal_features = self.df
|
||||
self.prices = prices
|
||||
self.window_size = window_size
|
||||
self.starting_point = starting_point
|
||||
self.rr = reward_kwargs["rr"]
|
||||
self.profit_aim = reward_kwargs["profit_aim"]
|
||||
|
||||
# # spaces
|
||||
if self.add_state_info:
|
||||
self.total_features = self.signal_features.shape[1] + 3
|
||||
else:
|
||||
self.total_features = self.signal_features.shape[1]
|
||||
self.shape = (window_size, self.total_features)
|
||||
self.set_action_space()
|
||||
self.observation_space = spaces.Box(
|
||||
low=-1, high=1, shape=self.shape, dtype=np.float32)
|
||||
|
||||
# episode
|
||||
self._start_tick: int = self.window_size
|
||||
self._end_tick: int = len(self.prices) - 1
|
||||
self._done: bool = False
|
||||
self._current_tick: int = self._start_tick
|
||||
self._last_trade_tick: Optional[int] = None
|
||||
self._position = Positions.Neutral
|
||||
self._position_history: list = [None]
|
||||
self.total_reward: float = 0
|
||||
self._total_profit: float = 1
|
||||
self._total_unrealized_profit: float = 1
|
||||
self.history: dict = {}
|
||||
self.trade_history: list = []
|
||||
|
||||
@abstractmethod
|
||||
def set_action_space(self):
|
||||
"""
|
||||
Unique to the environment action count. Must be inherited.
|
||||
"""
|
||||
|
||||
def seed(self, seed: int = 1):
|
||||
self.np_random, seed = seeding.np_random(seed)
|
||||
return [seed]
|
||||
|
||||
def tensorboard_log(self, metric: str, value: Union[int, float] = 1, inc: bool = True):
|
||||
"""
|
||||
Function builds the tensorboard_metrics dictionary
|
||||
to be parsed by the TensorboardCallback. This
|
||||
function is designed for tracking incremented objects,
|
||||
events, actions inside the training environment.
|
||||
For example, a user can call this to track the
|
||||
frequency of occurence of an `is_valid` call in
|
||||
their `calculate_reward()`:
|
||||
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
if not self._is_valid(action):
|
||||
self.tensorboard_log("is_valid")
|
||||
return -2
|
||||
|
||||
:param metric: metric to be tracked and incremented
|
||||
:param value: value to increment `metric` by
|
||||
:param inc: sets whether the `value` is incremented or not
|
||||
"""
|
||||
if not inc or metric not in self.tensorboard_metrics:
|
||||
self.tensorboard_metrics[metric] = value
|
||||
else:
|
||||
self.tensorboard_metrics[metric] += value
|
||||
|
||||
def reset_tensorboard_log(self):
|
||||
self.tensorboard_metrics = {}
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset is called at the beginning of every episode
|
||||
"""
|
||||
self.reset_tensorboard_log()
|
||||
|
||||
self._done = False
|
||||
|
||||
if self.starting_point is True:
|
||||
if self.rl_config.get('randomize_starting_position', False):
|
||||
length_of_data = int(self._end_tick / 4)
|
||||
start_tick = random.randint(self.window_size + 1, length_of_data)
|
||||
self._start_tick = start_tick
|
||||
self._position_history = (self._start_tick * [None]) + [self._position]
|
||||
else:
|
||||
self._position_history = (self.window_size * [None]) + [self._position]
|
||||
|
||||
self._current_tick = self._start_tick
|
||||
self._last_trade_tick = None
|
||||
self._position = Positions.Neutral
|
||||
|
||||
self.total_reward = 0.
|
||||
self._total_profit = 1. # unit
|
||||
self.history = {}
|
||||
self.trade_history = []
|
||||
self.portfolio_log_returns = np.zeros(len(self.prices))
|
||||
|
||||
self._profits = [(self._start_tick, 1)]
|
||||
self.close_trade_profit = []
|
||||
self._total_unrealized_profit = 1
|
||||
|
||||
return self._get_observation()
|
||||
|
||||
@abstractmethod
|
||||
def step(self, action: int):
|
||||
"""
|
||||
Step depeneds on action types, this must be inherited.
|
||||
"""
|
||||
return
|
||||
|
||||
def _get_observation(self):
|
||||
"""
|
||||
This may or may not be independent of action types, user can inherit
|
||||
this in their custom "MyRLEnv"
|
||||
"""
|
||||
features_window = self.signal_features[(
|
||||
self._current_tick - self.window_size):self._current_tick]
|
||||
if self.add_state_info:
|
||||
features_and_state = DataFrame(np.zeros((len(features_window), 3)),
|
||||
columns=['current_profit_pct',
|
||||
'position',
|
||||
'trade_duration'],
|
||||
index=features_window.index)
|
||||
|
||||
features_and_state['current_profit_pct'] = self.get_unrealized_profit()
|
||||
features_and_state['position'] = self._position.value
|
||||
features_and_state['trade_duration'] = self.get_trade_duration()
|
||||
features_and_state = pd.concat([features_window, features_and_state], axis=1)
|
||||
return features_and_state
|
||||
else:
|
||||
return features_window
|
||||
|
||||
def get_trade_duration(self):
|
||||
"""
|
||||
Get the trade duration if the agent is in a trade
|
||||
"""
|
||||
if self._last_trade_tick is None:
|
||||
return 0
|
||||
else:
|
||||
return self._current_tick - self._last_trade_tick
|
||||
|
||||
def get_unrealized_profit(self):
|
||||
"""
|
||||
Get the unrealized profit if the agent is in a trade
|
||||
"""
|
||||
if self._last_trade_tick is None:
|
||||
return 0.
|
||||
|
||||
if self._position == Positions.Neutral:
|
||||
return 0.
|
||||
elif self._position == Positions.Short:
|
||||
current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open)
|
||||
last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
return (last_trade_price - current_price) / last_trade_price
|
||||
elif self._position == Positions.Long:
|
||||
current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open)
|
||||
last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open)
|
||||
return (current_price - last_trade_price) / last_trade_price
|
||||
else:
|
||||
return 0.
|
||||
|
||||
@abstractmethod
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is a trade signal. This is
|
||||
unique to the actions in the environment, and therefore must be
|
||||
inherited.
|
||||
"""
|
||||
return True
|
||||
|
||||
def _is_valid(self, action: int) -> bool:
|
||||
"""
|
||||
Determine if the signal is valid.This is
|
||||
unique to the actions in the environment, and therefore must be
|
||||
inherited.
|
||||
"""
|
||||
return True
|
||||
|
||||
def add_entry_fee(self, price):
|
||||
return price * (1 + self.fee)
|
||||
|
||||
def add_exit_fee(self, price):
|
||||
return price / (1 + self.fee)
|
||||
|
||||
def _update_history(self, info):
|
||||
if not self.history:
|
||||
self.history = {key: [] for key in info.keys()}
|
||||
|
||||
for key, value in info.items():
|
||||
self.history[key].append(value)
|
||||
|
||||
@abstractmethod
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
"""
|
||||
An example reward function. This is the one function that users will likely
|
||||
wish to inject their own creativity into.
|
||||
:param action: int = The action made by the agent for the current candle.
|
||||
:return:
|
||||
float = the reward to give to the agent for current step (used for optimization
|
||||
of weights in NN)
|
||||
"""
|
||||
|
||||
def _update_unrealized_total_profit(self):
|
||||
"""
|
||||
Update the unrealized total profit incase of episode end.
|
||||
"""
|
||||
if self._position in (Positions.Long, Positions.Short):
|
||||
pnl = self.get_unrealized_profit()
|
||||
if self.compound_trades:
|
||||
# assumes unit stake and compounding
|
||||
unrl_profit = self._total_profit * (1 + pnl)
|
||||
else:
|
||||
# assumes unit stake and no compounding
|
||||
unrl_profit = self._total_profit + pnl
|
||||
self._total_unrealized_profit = unrl_profit
|
||||
|
||||
def _update_total_profit(self):
|
||||
pnl = self.get_unrealized_profit()
|
||||
if self.compound_trades:
|
||||
# assumes unit stake and compounding
|
||||
self._total_profit = self._total_profit * (1 + pnl)
|
||||
else:
|
||||
# assumes unit stake and no compounding
|
||||
self._total_profit += pnl
|
||||
|
||||
def current_price(self) -> float:
|
||||
return self.prices.iloc[self._current_tick].open
|
||||
|
||||
def get_actions(self) -> Type[Enum]:
|
||||
"""
|
||||
Used by SubprocVecEnv to get actions from
|
||||
initialized env for tensorboard callback
|
||||
"""
|
||||
return self.actions
|
||||
|
||||
# Keeping around incase we want to start building more complex environment
|
||||
# templates in the future.
|
||||
# def most_recent_return(self):
|
||||
# """
|
||||
# Calculate the tick to tick return if in a trade.
|
||||
# Return is generated from rising prices in Long
|
||||
# and falling prices in Short positions.
|
||||
# The actions Sell/Buy or Hold during a Long position trigger the sell/buy-fee.
|
||||
# """
|
||||
# # Long positions
|
||||
# if self._position == Positions.Long:
|
||||
# current_price = self.prices.iloc[self._current_tick].open
|
||||
# previous_price = self.prices.iloc[self._current_tick - 1].open
|
||||
|
||||
# if (self._position_history[self._current_tick - 1] == Positions.Short
|
||||
# or self._position_history[self._current_tick - 1] == Positions.Neutral):
|
||||
# previous_price = self.add_entry_fee(previous_price)
|
||||
|
||||
# return np.log(current_price) - np.log(previous_price)
|
||||
|
||||
# # Short positions
|
||||
# if self._position == Positions.Short:
|
||||
# current_price = self.prices.iloc[self._current_tick].open
|
||||
# previous_price = self.prices.iloc[self._current_tick - 1].open
|
||||
# if (self._position_history[self._current_tick - 1] == Positions.Long
|
||||
# or self._position_history[self._current_tick - 1] == Positions.Neutral):
|
||||
# previous_price = self.add_exit_fee(previous_price)
|
||||
|
||||
# return np.log(previous_price) - np.log(current_price)
|
||||
|
||||
# return 0
|
||||
|
||||
# def update_portfolio_log_returns(self, action):
|
||||
# self.portfolio_log_returns[self._current_tick] = self.most_recent_return(action)
|
418
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
Normal file
418
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
Normal file
|
@ -0,0 +1,418 @@
|
|||
import importlib
|
||||
import logging
|
||||
from abc import abstractmethod
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
||||
|
||||
import gym
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import pandas as pd
|
||||
import torch as th
|
||||
import torch.multiprocessing
|
||||
from pandas import DataFrame
|
||||
from stable_baselines3.common.callbacks import EvalCallback
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.utils import set_random_seed
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions
|
||||
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
torch.multiprocessing.set_sharing_strategy('file_system')
|
||||
|
||||
SB3_MODELS = ['PPO', 'A2C', 'DQN']
|
||||
SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO']
|
||||
|
||||
|
||||
class BaseReinforcementLearningModel(IFreqaiModel):
|
||||
"""
|
||||
User created Reinforcement Learning Model prediction class
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs) -> None:
|
||||
super().__init__(config=kwargs['config'])
|
||||
self.max_threads = min(self.freqai_info['rl_config'].get(
|
||||
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
|
||||
th.set_num_threads(self.max_threads)
|
||||
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
|
||||
self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
||||
self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
||||
self.eval_callback: Optional[EvalCallback] = None
|
||||
self.model_type = self.freqai_info['rl_config']['model_type']
|
||||
self.rl_config = self.freqai_info['rl_config']
|
||||
self.continual_learning = self.freqai_info.get('continual_learning', False)
|
||||
if self.model_type in SB3_MODELS:
|
||||
import_str = 'stable_baselines3'
|
||||
elif self.model_type in SB3_CONTRIB_MODELS:
|
||||
import_str = 'sb3_contrib'
|
||||
else:
|
||||
raise OperationalException(f'{self.model_type} not available in stable_baselines3 or '
|
||||
f'sb3_contrib. please choose one of {SB3_MODELS} or '
|
||||
f'{SB3_CONTRIB_MODELS}')
|
||||
|
||||
mod = importlib.import_module(import_str, self.model_type)
|
||||
self.MODELCLASS = getattr(mod, self.model_type)
|
||||
self.policy_type = self.freqai_info['rl_config']['policy_type']
|
||||
self.unset_outlier_removal()
|
||||
self.net_arch = self.rl_config.get('net_arch', [128, 128])
|
||||
self.dd.model_type = import_str
|
||||
self.tensorboard_callback: TensorboardCallback = \
|
||||
TensorboardCallback(verbose=1, actions=BaseActions)
|
||||
|
||||
def unset_outlier_removal(self):
|
||||
"""
|
||||
If user has activated any function that may remove training points, this
|
||||
function will set them to false and warn them
|
||||
"""
|
||||
if self.ft_params.get('use_SVM_to_remove_outliers', False):
|
||||
self.ft_params.update({'use_SVM_to_remove_outliers': False})
|
||||
logger.warning('User tried to use SVM with RL. Deactivating SVM.')
|
||||
if self.ft_params.get('use_DBSCAN_to_remove_outliers', False):
|
||||
self.ft_params.update({'use_DBSCAN_to_remove_outliers': False})
|
||||
logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.')
|
||||
if self.freqai_info['data_split_parameters'].get('shuffle', False):
|
||||
self.freqai_info['data_split_parameters'].update({'shuffle': False})
|
||||
logger.warning('User tried to shuffle training data. Setting shuffle to False')
|
||||
|
||||
def train(
|
||||
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||
) -> Any:
|
||||
"""
|
||||
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
|
||||
for storing, saving, loading, and analyzing the data.
|
||||
:param unfiltered_df: Full dataframe for the current training period
|
||||
:param metadata: pair metadata from strategy.
|
||||
:returns:
|
||||
:model: Trained model which can be used to inference (self.predict)
|
||||
"""
|
||||
|
||||
logger.info("--------------------Starting training " f"{pair} --------------------")
|
||||
|
||||
features_filtered, labels_filtered = dk.filter_features(
|
||||
unfiltered_df,
|
||||
dk.training_features_list,
|
||||
dk.label_list,
|
||||
training_filter=True,
|
||||
)
|
||||
|
||||
data_dictionary: Dict[str, Any] = dk.make_train_test_datasets(
|
||||
features_filtered, labels_filtered)
|
||||
dk.fit_labels() # FIXME useless for now, but just satiating append methods
|
||||
|
||||
# normalize all data based on train_dataset only
|
||||
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
|
||||
data_dictionary = dk.normalize_data(data_dictionary)
|
||||
|
||||
# data cleaning/analysis
|
||||
self.data_cleaning_train(dk)
|
||||
|
||||
logger.info(
|
||||
f'Training model on {len(dk.data_dictionary["train_features"].columns)}'
|
||||
f' features and {len(data_dictionary["train_features"])} data points'
|
||||
)
|
||||
|
||||
self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk)
|
||||
|
||||
model = self.fit(data_dictionary, dk)
|
||||
|
||||
logger.info(f"--------------------done training {pair}--------------------")
|
||||
|
||||
return model
|
||||
|
||||
def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame],
|
||||
prices_train: DataFrame, prices_test: DataFrame,
|
||||
dk: FreqaiDataKitchen):
|
||||
"""
|
||||
User can override this if they are using a custom MyRLEnv
|
||||
:param data_dictionary: dict = common data dictionary containing train and test
|
||||
features/labels/weights.
|
||||
:param prices_train/test: DataFrame = dataframe comprised of the prices to be used in the
|
||||
environment during training or testing
|
||||
:param dk: FreqaiDataKitchen = the datakitchen for the current pair
|
||||
"""
|
||||
train_df = data_dictionary["train_features"]
|
||||
test_df = data_dictionary["test_features"]
|
||||
|
||||
env_info = self.pack_env_dict()
|
||||
|
||||
self.train_env = self.MyRLEnv(df=train_df,
|
||||
prices=prices_train,
|
||||
**env_info)
|
||||
self.eval_env = Monitor(self.MyRLEnv(df=test_df,
|
||||
prices=prices_test,
|
||||
**env_info))
|
||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||
render=False, eval_freq=len(train_df),
|
||||
best_model_save_path=str(dk.data_path))
|
||||
|
||||
actions = self.train_env.get_actions()
|
||||
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
|
||||
|
||||
def pack_env_dict(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Create dictionary of environment arguments
|
||||
"""
|
||||
env_info = {"window_size": self.CONV_WIDTH,
|
||||
"reward_kwargs": self.reward_params,
|
||||
"config": self.config,
|
||||
"live": self.live,
|
||||
"can_short": self.can_short}
|
||||
if self.data_provider:
|
||||
env_info["fee"] = self.data_provider._exchange \
|
||||
.get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore
|
||||
|
||||
return env_info
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||
"""
|
||||
Agent customizations and abstract Reinforcement Learning customizations
|
||||
go in here. Abstract method, so this function must be overridden by
|
||||
user class.
|
||||
"""
|
||||
return
|
||||
|
||||
def get_state_info(self, pair: str) -> Tuple[float, float, int]:
|
||||
"""
|
||||
State info during dry/live (not backtesting) which is fed back
|
||||
into the model.
|
||||
:param pair: str = COIN/STAKE to get the environment information for
|
||||
:return:
|
||||
:market_side: float = representing short, long, or neutral for
|
||||
pair
|
||||
:current_profit: float = unrealized profit of the current trade
|
||||
:trade_duration: int = the number of candles that the trade has
|
||||
been open for
|
||||
"""
|
||||
open_trades = Trade.get_trades_proxy(is_open=True)
|
||||
market_side = 0.5
|
||||
current_profit: float = 0
|
||||
trade_duration = 0
|
||||
for trade in open_trades:
|
||||
if trade.pair == pair:
|
||||
if self.data_provider._exchange is None: # type: ignore
|
||||
logger.error('No exchange available.')
|
||||
return 0, 0, 0
|
||||
else:
|
||||
current_rate = self.data_provider._exchange.get_rate( # type: ignore
|
||||
pair, refresh=False, side="exit", is_short=trade.is_short)
|
||||
|
||||
now = datetime.now(timezone.utc).timestamp()
|
||||
trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds)
|
||||
current_profit = trade.calc_profit_ratio(current_rate)
|
||||
if trade.is_short:
|
||||
market_side = 0
|
||||
else:
|
||||
market_side = 1
|
||||
|
||||
return market_side, current_profit, int(trade_duration)
|
||||
|
||||
def predict(
|
||||
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param unfiltered_dataframe: Full dataframe for the current backtest period.
|
||||
:return:
|
||||
:pred_df: dataframe containing the predictions
|
||||
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||
"""
|
||||
|
||||
dk.find_features(unfiltered_df)
|
||||
filtered_dataframe, _ = dk.filter_features(
|
||||
unfiltered_df, dk.training_features_list, training_filter=False
|
||||
)
|
||||
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
|
||||
dk.data_dictionary["prediction_features"] = filtered_dataframe
|
||||
|
||||
# optional additional data cleaning/analysis
|
||||
self.data_cleaning_predict(dk)
|
||||
|
||||
pred_df = self.rl_model_predict(
|
||||
dk.data_dictionary["prediction_features"], dk, self.model)
|
||||
pred_df.fillna(0, inplace=True)
|
||||
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
def rl_model_predict(self, dataframe: DataFrame,
|
||||
dk: FreqaiDataKitchen, model: Any) -> DataFrame:
|
||||
"""
|
||||
A helper function to make predictions in the Reinforcement learning module.
|
||||
:param dataframe: DataFrame = the dataframe of features to make the predictions on
|
||||
:param dk: FreqaiDatakitchen = data kitchen for the current pair
|
||||
:param model: Any = the trained model used to inference the features.
|
||||
"""
|
||||
output = pd.DataFrame(np.zeros(len(dataframe)), columns=dk.label_list)
|
||||
|
||||
def _predict(window):
|
||||
observations = dataframe.iloc[window.index]
|
||||
if self.live and self.rl_config.get('add_state_info', False):
|
||||
market_side, current_profit, trade_duration = self.get_state_info(dk.pair)
|
||||
observations['current_profit_pct'] = current_profit
|
||||
observations['position'] = market_side
|
||||
observations['trade_duration'] = trade_duration
|
||||
res, _ = model.predict(observations, deterministic=True)
|
||||
return res
|
||||
|
||||
output = output.rolling(window=self.CONV_WIDTH).apply(_predict)
|
||||
|
||||
return output
|
||||
|
||||
def build_ohlc_price_dataframes(self, data_dictionary: dict,
|
||||
pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame,
|
||||
DataFrame]:
|
||||
"""
|
||||
Builds the train prices and test prices for the environment.
|
||||
"""
|
||||
|
||||
pair = pair.replace(':', '')
|
||||
train_df = data_dictionary["train_features"]
|
||||
test_df = data_dictionary["test_features"]
|
||||
|
||||
# price data for model training and evaluation
|
||||
tf = self.config['timeframe']
|
||||
ohlc_list = [f'%-{pair}raw_open_{tf}', f'%-{pair}raw_low_{tf}',
|
||||
f'%-{pair}raw_high_{tf}', f'%-{pair}raw_close_{tf}']
|
||||
rename_dict = {f'%-{pair}raw_open_{tf}': 'open', f'%-{pair}raw_low_{tf}': 'low',
|
||||
f'%-{pair}raw_high_{tf}': ' high', f'%-{pair}raw_close_{tf}': 'close'}
|
||||
|
||||
prices_train = train_df.filter(ohlc_list, axis=1)
|
||||
if prices_train.empty:
|
||||
raise OperationalException('Reinforcement learning module didnt find the raw prices '
|
||||
'assigned in populate_any_indicators. Please assign them '
|
||||
'with:\n'
|
||||
'informative[f"%-{pair}raw_close"] = informative["close"]\n'
|
||||
'informative[f"%-{pair}raw_open"] = informative["open"]\n'
|
||||
'informative[f"%-{pair}raw_high"] = informative["high"]\n'
|
||||
'informative[f"%-{pair}raw_low"] = informative["low"]\n')
|
||||
prices_train.rename(columns=rename_dict, inplace=True)
|
||||
prices_train.reset_index(drop=True)
|
||||
|
||||
prices_test = test_df.filter(ohlc_list, axis=1)
|
||||
prices_test.rename(columns=rename_dict, inplace=True)
|
||||
prices_test.reset_index(drop=True)
|
||||
|
||||
return prices_train, prices_test
|
||||
|
||||
def load_model_from_disk(self, dk: FreqaiDataKitchen) -> Any:
|
||||
"""
|
||||
Can be used by user if they are trying to limit_ram_usage *and*
|
||||
perform continual learning.
|
||||
For now, this is unused.
|
||||
"""
|
||||
exists = Path(dk.data_path / f"{dk.model_filename}_model").is_file()
|
||||
if exists:
|
||||
model = self.MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||
else:
|
||||
logger.info('No model file on disk to continue learning from.')
|
||||
|
||||
return model
|
||||
|
||||
def _on_stop(self):
|
||||
"""
|
||||
Hook called on bot shutdown. Close SubprocVecEnv subprocesses for clean shutdown.
|
||||
"""
|
||||
|
||||
if self.train_env:
|
||||
self.train_env.close()
|
||||
|
||||
if self.eval_env:
|
||||
self.eval_env.close()
|
||||
|
||||
# Nested class which can be overridden by user to customize further
|
||||
class MyRLEnv(Base5ActionRLEnv):
|
||||
"""
|
||||
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||
sets a custom reward based on profit and trade duration.
|
||||
"""
|
||||
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
"""
|
||||
An example reward function. This is the one function that users will likely
|
||||
wish to inject their own creativity into.
|
||||
:param action: int = The action made by the agent for the current candle.
|
||||
:return:
|
||||
float = the reward to give to the agent for current step (used for optimization
|
||||
of weights in NN)
|
||||
"""
|
||||
# first, penalize if the action is not valid
|
||||
if not self._is_valid(action):
|
||||
return -2
|
||||
|
||||
pnl = self.get_unrealized_profit()
|
||||
factor = 100.
|
||||
|
||||
# reward agent for entering trades
|
||||
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
||||
and self._position == Positions.Neutral):
|
||||
return 25
|
||||
# discourage agent from not entering trades
|
||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||
return -1
|
||||
|
||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||
if self._last_trade_tick:
|
||||
trade_duration = self._current_tick - self._last_trade_tick
|
||||
else:
|
||||
trade_duration = 0
|
||||
|
||||
if trade_duration <= max_trade_duration:
|
||||
factor *= 1.5
|
||||
elif trade_duration > max_trade_duration:
|
||||
factor *= 0.5
|
||||
|
||||
# discourage sitting in position
|
||||
if (self._position in (Positions.Short, Positions.Long) and
|
||||
action == Actions.Neutral.value):
|
||||
return -1 * trade_duration / max_trade_duration
|
||||
|
||||
# close long
|
||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
|
||||
# close short
|
||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
|
||||
return 0.
|
||||
|
||||
|
||||
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
||||
seed: int, train_df: DataFrame, price: DataFrame,
|
||||
monitor: bool = False,
|
||||
env_info: Dict[str, Any] = {}) -> Callable:
|
||||
"""
|
||||
Utility function for multiprocessed env.
|
||||
|
||||
:param env_id: (str) the environment ID
|
||||
:param num_env: (int) the number of environment you wish to have in subprocesses
|
||||
:param seed: (int) the inital seed for RNG
|
||||
:param rank: (int) index of the subprocess
|
||||
:param env_info: (dict) all required arguments to instantiate the environment.
|
||||
:return: (Callable)
|
||||
"""
|
||||
|
||||
def _init() -> gym.Env:
|
||||
|
||||
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
|
||||
**env_info)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
set_random_seed(seed)
|
||||
return _init
|
59
freqtrade/freqai/RL/TensorboardCallback.py
Normal file
59
freqtrade/freqai/RL/TensorboardCallback.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
from enum import Enum
|
||||
from typing import Any, Dict, Type, Union
|
||||
|
||||
from stable_baselines3.common.callbacks import BaseCallback
|
||||
from stable_baselines3.common.logger import HParam
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment
|
||||
|
||||
|
||||
class TensorboardCallback(BaseCallback):
|
||||
"""
|
||||
Custom callback for plotting additional values in tensorboard and
|
||||
episodic summary reports.
|
||||
"""
|
||||
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
||||
super(TensorboardCallback, self).__init__(verbose)
|
||||
self.model: Any = None
|
||||
self.logger = None # type: Any
|
||||
self.training_env: BaseEnvironment = None # type: ignore
|
||||
self.actions: Type[Enum] = actions
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
hparam_dict = {
|
||||
"algorithm": self.model.__class__.__name__,
|
||||
"learning_rate": self.model.learning_rate,
|
||||
# "gamma": self.model.gamma,
|
||||
# "gae_lambda": self.model.gae_lambda,
|
||||
# "batch_size": self.model.batch_size,
|
||||
# "n_steps": self.model.n_steps,
|
||||
}
|
||||
metric_dict: Dict[str, Union[float, int]] = {
|
||||
"eval/mean_reward": 0,
|
||||
"rollout/ep_rew_mean": 0,
|
||||
"rollout/ep_len_mean": 0,
|
||||
"train/value_loss": 0,
|
||||
"train/explained_variance": 0,
|
||||
}
|
||||
self.logger.record(
|
||||
"hparams",
|
||||
HParam(hparam_dict, metric_dict),
|
||||
exclude=("stdout", "log", "json", "csv"),
|
||||
)
|
||||
|
||||
def _on_step(self) -> bool:
|
||||
|
||||
local_info = self.locals["infos"][0]
|
||||
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
||||
|
||||
for info in local_info:
|
||||
if info not in ["episode", "terminal_observation"]:
|
||||
self.logger.record(f"_info/{info}", local_info[info])
|
||||
|
||||
for info in tensorboard_metrics:
|
||||
if info in [action.name for action in self.actions]:
|
||||
self.logger.record(f"_actions/{info}", tensorboard_metrics[info])
|
||||
else:
|
||||
self.logger.record(f"_custom/{info}", tensorboard_metrics[info])
|
||||
|
||||
return True
|
0
freqtrade/freqai/RL/__init__.py
Normal file
0
freqtrade/freqai/RL/__init__.py
Normal file
|
@ -95,9 +95,14 @@ class BaseClassifierModel(IFreqaiModel):
|
|||
self.data_cleaning_predict(dk)
|
||||
|
||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||
if self.CONV_WIDTH == 1:
|
||||
predictions = np.reshape(predictions, (-1, len(dk.label_list)))
|
||||
|
||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
||||
|
||||
predictions_prob = self.model.predict_proba(dk.data_dictionary["prediction_features"])
|
||||
if self.CONV_WIDTH == 1:
|
||||
predictions_prob = np.reshape(predictions_prob, (-1, len(self.model.classes_)))
|
||||
pred_df_prob = DataFrame(predictions_prob, columns=self.model.classes_)
|
||||
|
||||
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
||||
|
|
|
@ -95,6 +95,9 @@ class BaseRegressionModel(IFreqaiModel):
|
|||
self.data_cleaning_predict(dk)
|
||||
|
||||
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
|
||||
if self.CONV_WIDTH == 1:
|
||||
predictions = np.reshape(predictions, (-1, len(dk.label_list)))
|
||||
|
||||
pred_df = DataFrame(predictions, columns=dk.label_list)
|
||||
|
||||
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import collections
|
||||
import importlib
|
||||
import logging
|
||||
import re
|
||||
import shutil
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Tuple, TypedDict
|
||||
|
||||
|
@ -81,6 +82,7 @@ class FreqaiDataDrawer:
|
|||
self.historic_predictions_bkp_path = Path(
|
||||
self.full_path / "historic_predictions.backup.pkl")
|
||||
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
||||
self.global_metadata_path = Path(self.full_path / "global_metadata.json")
|
||||
self.metric_tracker_path = Path(self.full_path / "metric_tracker.json")
|
||||
self.follow_mode = follow_mode
|
||||
if follow_mode:
|
||||
|
@ -98,6 +100,7 @@ class FreqaiDataDrawer:
|
|||
self.empty_pair_dict: pair_info = {
|
||||
"model_filename": "", "trained_timestamp": 0,
|
||||
"data_path": "", "extras": {}}
|
||||
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
|
||||
|
||||
def update_metric_tracker(self, metric: str, value: float, pair: str) -> None:
|
||||
"""
|
||||
|
@ -125,6 +128,17 @@ class FreqaiDataDrawer:
|
|||
self.update_metric_tracker('cpu_load5min', load5 / cpus, pair)
|
||||
self.update_metric_tracker('cpu_load15min', load15 / cpus, pair)
|
||||
|
||||
def load_global_metadata_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved global metadata in present model folder.
|
||||
"""
|
||||
exists = self.global_metadata_path.is_file()
|
||||
if exists:
|
||||
with open(self.global_metadata_path, "r") as fp:
|
||||
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
||||
return metatada_dict
|
||||
return {}
|
||||
|
||||
def load_drawer_from_disk(self):
|
||||
"""
|
||||
Locate and load a previously saved data drawer full of all pair model metadata in
|
||||
|
@ -225,6 +239,15 @@ class FreqaiDataDrawer:
|
|||
rapidjson.dump(self.follower_dict, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def save_global_metadata_to_disk(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Save global metadata json to disk
|
||||
"""
|
||||
with self.save_lock:
|
||||
with open(self.global_metadata_path, 'w') as fp:
|
||||
rapidjson.dump(metadata, fp, default=self.np_encoder,
|
||||
number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
def create_follower_dict(self):
|
||||
"""
|
||||
Create or dictionary for each follower to maintain unique persistent prediction targets
|
||||
|
@ -476,10 +499,12 @@ class FreqaiDataDrawer:
|
|||
save_path = Path(dk.data_path)
|
||||
|
||||
# Save the trained model
|
||||
if not dk.keras:
|
||||
if self.model_type == 'joblib':
|
||||
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
||||
else:
|
||||
elif self.model_type == 'keras':
|
||||
model.save(save_path / f"{dk.model_filename}_model.h5")
|
||||
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
||||
model.save(save_path / f"{dk.model_filename}_model.zip")
|
||||
|
||||
if dk.svm_model is not None:
|
||||
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
@ -506,11 +531,10 @@ class FreqaiDataDrawer:
|
|||
dk.pca, open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "wb")
|
||||
)
|
||||
|
||||
# if self.live:
|
||||
# store as much in ram as possible to increase performance
|
||||
self.model_dictionary[coin] = model
|
||||
self.pair_dict[coin]["model_filename"] = dk.model_filename
|
||||
self.pair_dict[coin]["data_path"] = str(dk.data_path)
|
||||
|
||||
if coin not in self.meta_data_dictionary:
|
||||
self.meta_data_dictionary[coin] = {}
|
||||
self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"]
|
||||
|
@ -542,14 +566,6 @@ class FreqaiDataDrawer:
|
|||
if dk.live:
|
||||
dk.model_filename = self.pair_dict[coin]["model_filename"]
|
||||
dk.data_path = Path(self.pair_dict[coin]["data_path"])
|
||||
if self.freqai_info.get("follow_mode", False):
|
||||
# follower can be on a different system which is rsynced from the leader:
|
||||
dk.data_path = Path(
|
||||
self.config["user_data_dir"]
|
||||
/ "models"
|
||||
/ dk.data_path.parts[-2]
|
||||
/ dk.data_path.parts[-1]
|
||||
)
|
||||
|
||||
if coin in self.meta_data_dictionary:
|
||||
dk.data = self.meta_data_dictionary[coin]["meta_data"]
|
||||
|
@ -568,12 +584,16 @@ class FreqaiDataDrawer:
|
|||
# try to access model in memory instead of loading object from disk to save time
|
||||
if dk.live and coin in self.model_dictionary:
|
||||
model = self.model_dictionary[coin]
|
||||
elif not dk.keras:
|
||||
elif self.model_type == 'joblib':
|
||||
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
||||
else:
|
||||
elif self.model_type == 'keras':
|
||||
from tensorflow import keras
|
||||
|
||||
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
|
||||
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
||||
mod = importlib.import_module(
|
||||
self.model_type, self.freqai_info['rl_config']['model_type'])
|
||||
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
||||
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
||||
|
||||
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
|
||||
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
|
||||
|
@ -583,6 +603,10 @@ class FreqaiDataDrawer:
|
|||
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
||||
)
|
||||
|
||||
# load it into ram if it was loaded from disk
|
||||
if coin not in self.model_dictionary:
|
||||
self.model_dictionary[coin] = model
|
||||
|
||||
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
|
||||
dk.pca = cloudpickle.load(
|
||||
open(dk.data_path / f"{dk.model_filename}_pca_object.pkl", "rb")
|
||||
|
@ -693,3 +717,31 @@ class FreqaiDataDrawer:
|
|||
).reset_index(drop=True)
|
||||
|
||||
return corr_dataframes, base_dataframes
|
||||
|
||||
def get_timerange_from_live_historic_predictions(self) -> TimeRange:
|
||||
"""
|
||||
Returns timerange information based on historic predictions file
|
||||
:return: timerange calculated from saved live data
|
||||
"""
|
||||
if not self.historic_predictions_path.is_file():
|
||||
raise OperationalException(
|
||||
'Historic predictions not found. Historic predictions data is required '
|
||||
'to run backtest with the freqai-backtest-live-models option '
|
||||
)
|
||||
|
||||
self.load_historic_predictions_from_disk()
|
||||
|
||||
all_pairs_end_dates = []
|
||||
for pair in self.historic_predictions:
|
||||
pair_historic_data = self.historic_predictions[pair]
|
||||
all_pairs_end_dates.append(pair_historic_data.date_pred.max())
|
||||
|
||||
global_metadata = self.load_global_metadata_from_disk()
|
||||
start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"]))
|
||||
end_date = max(all_pairs_end_dates)
|
||||
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
||||
end_date = end_date + timedelta(days=1)
|
||||
backtesting_timerange = TimeRange(
|
||||
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
||||
)
|
||||
return backtesting_timerange
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import copy
|
||||
import logging
|
||||
import shutil
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from datetime import datetime, timezone
|
||||
from math import cos, sin
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple
|
|||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import pandas as pd
|
||||
import psutil
|
||||
from pandas import DataFrame
|
||||
from scipy import stats
|
||||
from sklearn import linear_model
|
||||
|
@ -86,12 +87,7 @@ class FreqaiDataKitchen:
|
|||
if not self.live:
|
||||
self.full_path = self.get_full_models_path(self.config)
|
||||
|
||||
if self.backtest_live_models:
|
||||
if self.pair:
|
||||
self.set_timerange_from_ready_models()
|
||||
(self.training_timeranges,
|
||||
self.backtesting_timeranges) = self.split_timerange_live_models()
|
||||
else:
|
||||
if not self.backtest_live_models:
|
||||
self.full_timerange = self.create_fulltimerange(
|
||||
self.config["timerange"], self.freqai_config.get("train_period_days", 0)
|
||||
)
|
||||
|
@ -102,7 +98,10 @@ class FreqaiDataKitchen:
|
|||
)
|
||||
|
||||
self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {})
|
||||
self.thread_count = self.freqai_config.get("data_kitchen_thread_count", -1)
|
||||
if not self.freqai_config.get("data_kitchen_thread_count", 0):
|
||||
self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||
else:
|
||||
self.thread_count = self.freqai_config["data_kitchen_thread_count"]
|
||||
self.train_dates: DataFrame = pd.DataFrame()
|
||||
self.unique_classes: Dict[str, list] = {}
|
||||
self.unique_class_list: list = []
|
||||
|
@ -456,29 +455,6 @@ class FreqaiDataKitchen:
|
|||
# print(tr_training_list, tr_backtesting_list)
|
||||
return tr_training_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def split_timerange_live_models(
|
||||
self
|
||||
) -> Tuple[list, list]:
|
||||
|
||||
tr_backtesting_list_timerange = []
|
||||
asset = self.pair.split("/")[0]
|
||||
if asset not in self.backtest_live_models_data["assets_end_dates"]:
|
||||
raise OperationalException(
|
||||
f"Model not available for pair {self.pair}. "
|
||||
"Please, try again after removing this pair from the configuration file."
|
||||
)
|
||||
asset_data = self.backtest_live_models_data["assets_end_dates"][asset]
|
||||
backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"]
|
||||
model_end_dates = [x for x in asset_data]
|
||||
model_end_dates.append(backtesting_timerange.stopts)
|
||||
model_end_dates.sort()
|
||||
for index, item in enumerate(model_end_dates):
|
||||
if len(model_end_dates) > (index + 1):
|
||||
tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1])
|
||||
tr_backtesting_list_timerange.append(tr_to_add)
|
||||
|
||||
return tr_backtesting_list_timerange, tr_backtesting_list_timerange
|
||||
|
||||
def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Given a full dataframe, extract the user desired window
|
||||
|
@ -486,10 +462,10 @@ class FreqaiDataKitchen:
|
|||
:param df: Dataframe containing all candles to run the entire backtest. Here
|
||||
it is sliced down to just the present training period.
|
||||
"""
|
||||
|
||||
df = df.loc[df["date"] >= timerange.startdt, :]
|
||||
if not self.live:
|
||||
df = df.loc[df["date"] < timerange.stopdt, :]
|
||||
df = df.loc[(df["date"] >= timerange.startdt) & (df["date"] < timerange.stopdt), :]
|
||||
else:
|
||||
df = df.loc[df["date"] >= timerange.startdt, :]
|
||||
|
||||
return df
|
||||
|
||||
|
@ -974,7 +950,8 @@ class FreqaiDataKitchen:
|
|||
return weights
|
||||
|
||||
def get_predictions_to_append(self, predictions: DataFrame,
|
||||
do_predict: npt.ArrayLike) -> DataFrame:
|
||||
do_predict: npt.ArrayLike,
|
||||
dataframe_backtest: DataFrame) -> DataFrame:
|
||||
"""
|
||||
Get backtest prediction from current backtest period
|
||||
"""
|
||||
|
@ -996,7 +973,9 @@ class FreqaiDataKitchen:
|
|||
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
|
||||
append_df["DI_values"] = self.DI_values
|
||||
|
||||
return append_df
|
||||
dataframe_backtest.reset_index(drop=True, inplace=True)
|
||||
merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1)
|
||||
return merged_df
|
||||
|
||||
def append_predictions(self, append_df: DataFrame) -> None:
|
||||
"""
|
||||
|
@ -1006,23 +985,18 @@ class FreqaiDataKitchen:
|
|||
if self.full_df.empty:
|
||||
self.full_df = append_df
|
||||
else:
|
||||
self.full_df = pd.concat([self.full_df, append_df], axis=0)
|
||||
self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True)
|
||||
|
||||
def fill_predictions(self, dataframe):
|
||||
"""
|
||||
Back fill values to before the backtesting range so that the dataframe matches size
|
||||
when it goes back to the strategy. These rows are not included in the backtest.
|
||||
"""
|
||||
|
||||
len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count
|
||||
filler_df = pd.DataFrame(
|
||||
np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns
|
||||
)
|
||||
|
||||
self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True)
|
||||
|
||||
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
||||
self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1)
|
||||
self.return_dataframe = pd.merge(dataframe[to_keep],
|
||||
self.full_df, how='left', on='date')
|
||||
self.return_dataframe[self.full_df.columns] = (
|
||||
self.return_dataframe[self.full_df.columns].fillna(value=0))
|
||||
self.full_df = DataFrame()
|
||||
|
||||
return
|
||||
|
@ -1319,22 +1293,22 @@ class FreqaiDataKitchen:
|
|||
self, append_df: DataFrame
|
||||
) -> None:
|
||||
"""
|
||||
Save prediction dataframe from backtesting to h5 file format
|
||||
Save prediction dataframe from backtesting to feather file format
|
||||
:param append_df: dataframe for backtesting period
|
||||
"""
|
||||
full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder)
|
||||
if not full_predictions_folder.is_dir():
|
||||
full_predictions_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w')
|
||||
append_df.to_feather(self.backtesting_results_path)
|
||||
|
||||
def get_backtesting_prediction(
|
||||
self
|
||||
) -> DataFrame:
|
||||
"""
|
||||
Get prediction dataframe from h5 file format
|
||||
Get prediction dataframe from feather file format
|
||||
"""
|
||||
append_df = pd.read_hdf(self.backtesting_results_path)
|
||||
append_df = pd.read_feather(self.backtesting_results_path)
|
||||
return append_df
|
||||
|
||||
def check_if_backtest_prediction_is_valid(
|
||||
|
@ -1350,19 +1324,20 @@ class FreqaiDataKitchen:
|
|||
"""
|
||||
path_to_predictionfile = Path(self.full_path /
|
||||
self.backtest_predictions_folder /
|
||||
f"{self.model_filename}_prediction.h5")
|
||||
f"{self.model_filename}_prediction.feather")
|
||||
self.backtesting_results_path = path_to_predictionfile
|
||||
|
||||
file_exists = path_to_predictionfile.is_file()
|
||||
|
||||
if file_exists:
|
||||
append_df = self.get_backtesting_prediction()
|
||||
if len(append_df) == len_backtest_df:
|
||||
if len(append_df) == len_backtest_df and 'date' in append_df:
|
||||
logger.info(f"Found backtesting prediction file at {path_to_predictionfile}")
|
||||
return True
|
||||
else:
|
||||
logger.info("A new backtesting prediction file is required. "
|
||||
"(Number of predictions is different from dataframe length).")
|
||||
"(Number of predictions is different from dataframe length or "
|
||||
"old prediction file version).")
|
||||
return False
|
||||
else:
|
||||
logger.info(
|
||||
|
@ -1370,17 +1345,6 @@ class FreqaiDataKitchen:
|
|||
)
|
||||
return False
|
||||
|
||||
def set_timerange_from_ready_models(self):
|
||||
backtesting_timerange, \
|
||||
assets_end_dates = (
|
||||
self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path))
|
||||
|
||||
self.backtest_live_models_data = {
|
||||
"backtesting_timerange": backtesting_timerange,
|
||||
"assets_end_dates": assets_end_dates
|
||||
}
|
||||
return
|
||||
|
||||
def get_full_models_path(self, config: Config) -> Path:
|
||||
"""
|
||||
Returns default FreqAI model path
|
||||
|
@ -1391,88 +1355,6 @@ class FreqaiDataKitchen:
|
|||
config["user_data_dir"] / "models" / str(freqai_config.get("identifier"))
|
||||
)
|
||||
|
||||
def get_timerange_and_assets_end_dates_from_ready_models(
|
||||
self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]:
|
||||
"""
|
||||
Returns timerange information based on a FreqAI model directory
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Tuple with (Timerange calculated from directory and
|
||||
a Dict with pair and model end training dates info)
|
||||
"""
|
||||
all_models_end_dates = []
|
||||
assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models(
|
||||
models_path)
|
||||
for key in assets_end_dates:
|
||||
for model_end_date in assets_end_dates[key]:
|
||||
if model_end_date not in all_models_end_dates:
|
||||
all_models_end_dates.append(model_end_date)
|
||||
|
||||
if len(all_models_end_dates) == 0:
|
||||
raise OperationalException(
|
||||
'At least 1 saved model is required to '
|
||||
'run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
|
||||
if len(all_models_end_dates) == 1:
|
||||
logger.warning(
|
||||
"Only 1 model was found. Backtesting will run with the "
|
||||
"timerange from the end of the training date to the current date"
|
||||
)
|
||||
|
||||
finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp())
|
||||
if len(all_models_end_dates) > 1:
|
||||
# After last model end date, use the same period from previous model
|
||||
# to finish the backtest
|
||||
all_models_end_dates.sort(reverse=True)
|
||||
finish_timestamp = all_models_end_dates[0] + \
|
||||
(all_models_end_dates[0] - all_models_end_dates[1])
|
||||
|
||||
all_models_end_dates.append(finish_timestamp)
|
||||
all_models_end_dates.sort()
|
||||
start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates),
|
||||
timezone.utc).timetuple()[:3], tzinfo=timezone.utc))
|
||||
|
||||
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
||||
end_date = end_date + timedelta(days=1)
|
||||
backtesting_timerange = TimeRange(
|
||||
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
||||
)
|
||||
return backtesting_timerange, assets_end_dates
|
||||
|
||||
def get_assets_timestamps_training_from_ready_models(
|
||||
self, models_path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
Scan the models path and returns all assets end training dates (timestamp)
|
||||
:param models_path: FreqAI model path
|
||||
|
||||
:return: a Dict with asset and model end training dates info
|
||||
"""
|
||||
assets_end_dates: Dict[str, Any] = {}
|
||||
if not models_path.is_dir():
|
||||
raise OperationalException(
|
||||
'Model folders not found. Saved models are required '
|
||||
'to run backtest with the freqai-backtest-live-models option'
|
||||
)
|
||||
for model_dir in models_path.iterdir():
|
||||
if str(model_dir.name).startswith("sub-train"):
|
||||
model_end_date = int(model_dir.name.split("_")[1])
|
||||
asset = model_dir.name.split("_")[0].replace("sub-train-", "")
|
||||
model_file_name = (
|
||||
f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}"
|
||||
"_model.joblib"
|
||||
)
|
||||
|
||||
model_path_file = Path(model_dir / model_file_name)
|
||||
if model_path_file.is_file():
|
||||
if asset not in assets_end_dates:
|
||||
assets_end_dates[asset] = []
|
||||
assets_end_dates[asset].append(model_end_date)
|
||||
|
||||
return assets_end_dates
|
||||
|
||||
def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Remove all special characters from feature strings (:)
|
||||
|
|
|
@ -5,15 +5,17 @@ from abc import ABC, abstractmethod
|
|||
from collections import deque
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Tuple
|
||||
from typing import Any, Dict, List, Literal, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import psutil
|
||||
from numpy.typing import NDArray
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
|
@ -67,6 +69,7 @@ class IFreqaiModel(ABC):
|
|||
self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True)
|
||||
if self.save_backtest_models:
|
||||
logger.info('Backtesting module configured to save all models.')
|
||||
|
||||
self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode)
|
||||
# set current candle to arbitrary historical date
|
||||
self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc)
|
||||
|
@ -98,6 +101,10 @@ class IFreqaiModel(ABC):
|
|||
self.get_corr_dataframes: bool = True
|
||||
self._threads: List[threading.Thread] = []
|
||||
self._stop_event = threading.Event()
|
||||
self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk()
|
||||
self.data_provider: Optional[DataProvider] = None
|
||||
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
|
||||
self.can_short = True # overridden in start() with strategy.can_short
|
||||
|
||||
record_params(config, self.full_path)
|
||||
|
||||
|
@ -126,11 +133,14 @@ class IFreqaiModel(ABC):
|
|||
|
||||
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
||||
self.dd.set_pair_dict_info(metadata)
|
||||
self.data_provider = strategy.dp
|
||||
self.can_short = strategy.can_short
|
||||
|
||||
if self.live:
|
||||
self.inference_timer('start')
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
dk = self.start_live(dataframe, metadata, strategy, self.dk)
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
|
||||
# For backtesting, each pair enters and then gets trained for each window along the
|
||||
# sliding window defined by "train_period_days" (training window) and "live_retrain_hours"
|
||||
|
@ -139,20 +149,24 @@ class IFreqaiModel(ABC):
|
|||
# the concatenated results for the full backtesting period back to the strategy.
|
||||
elif not self.follow_mode:
|
||||
self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"])
|
||||
if self.dk.backtest_live_models:
|
||||
logger.info(
|
||||
f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)")
|
||||
else:
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
dataframe = self.dk.use_strategy_to_populate_indicators(
|
||||
strategy, prediction_dataframe=dataframe, pair=metadata["pair"]
|
||||
)
|
||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||
if not self.config.get("freqai_backtest_live_models", False):
|
||||
logger.info(f"Training {len(self.dk.training_timeranges)} timeranges")
|
||||
dk = self.start_backtesting(dataframe, metadata, self.dk)
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
else:
|
||||
logger.info(
|
||||
"Backtesting using historic predictions (live models)")
|
||||
dk = self.start_backtesting_from_historic_predictions(
|
||||
dataframe, metadata, self.dk)
|
||||
dataframe = dk.return_dataframe
|
||||
|
||||
dataframe = dk.remove_features_from_df(dk.return_dataframe)
|
||||
self.clean_up()
|
||||
if self.live:
|
||||
self.inference_timer('stop', metadata["pair"])
|
||||
|
||||
return dataframe
|
||||
|
||||
def clean_up(self):
|
||||
|
@ -164,6 +178,13 @@ class IFreqaiModel(ABC):
|
|||
self.model = None
|
||||
self.dk = None
|
||||
|
||||
def _on_stop(self):
|
||||
"""
|
||||
Callback for Subclasses to override to include logic for shutting down resources
|
||||
when SIGINT is sent.
|
||||
"""
|
||||
return
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Cleans up threads on Shutdown, set stop event. Join threads to wait
|
||||
|
@ -172,6 +193,9 @@ class IFreqaiModel(ABC):
|
|||
logger.info("Stopping FreqAI")
|
||||
self._stop_event.set()
|
||||
|
||||
self.data_provider = None
|
||||
self._on_stop()
|
||||
|
||||
logger.info("Waiting on Training iteration")
|
||||
for _thread in self._threads:
|
||||
_thread.join()
|
||||
|
@ -260,10 +284,10 @@ class IFreqaiModel(ABC):
|
|||
train_it += 1
|
||||
total_trains = len(dk.backtesting_timeranges)
|
||||
self.training_timerange = tr_train
|
||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
|
||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||
len_backtest_df = len(dataframe.loc[(dataframe["date"] >= tr_backtest.startdt) & (
|
||||
dataframe["date"] < tr_backtest.stopdt), :])
|
||||
|
||||
if not self.ensure_data_exists(dataframe_backtest, tr_backtest, pair):
|
||||
if not self.ensure_data_exists(len_backtest_df, tr_backtest, pair):
|
||||
continue
|
||||
|
||||
self.log_backtesting_progress(tr_train, pair, train_it, total_trains)
|
||||
|
@ -276,13 +300,15 @@ class IFreqaiModel(ABC):
|
|||
|
||||
dk.set_new_model_names(pair, timestamp_model_id)
|
||||
|
||||
if dk.check_if_backtest_prediction_is_valid(len(dataframe_backtest)):
|
||||
if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
|
||||
self.dd.load_metadata(dk)
|
||||
dk.find_features(dataframe_train)
|
||||
dk.find_features(dataframe)
|
||||
self.check_if_feature_list_matches_strategy(dk)
|
||||
append_df = dk.get_backtesting_prediction()
|
||||
dk.append_predictions(append_df)
|
||||
else:
|
||||
dataframe_train = dk.slice_dataframe(tr_train, dataframe)
|
||||
dataframe_backtest = dk.slice_dataframe(tr_backtest, dataframe)
|
||||
if not self.model_exists(dk):
|
||||
dk.find_features(dataframe_train)
|
||||
dk.find_labels(dataframe_train)
|
||||
|
@ -301,10 +327,11 @@ class IFreqaiModel(ABC):
|
|||
self.model = self.dd.load_data(pair, dk)
|
||||
|
||||
pred_df, do_preds = self.predict(dataframe_backtest, dk)
|
||||
append_df = dk.get_predictions_to_append(pred_df, do_preds)
|
||||
append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest)
|
||||
dk.append_predictions(append_df)
|
||||
dk.save_backtesting_prediction(append_df)
|
||||
|
||||
self.backtesting_fit_live_predictions(dk)
|
||||
dk.fill_predictions(dataframe)
|
||||
|
||||
return dk
|
||||
|
@ -617,6 +644,8 @@ class IFreqaiModel(ABC):
|
|||
self.dd.historic_predictions[pair] = pred_df
|
||||
hist_preds_df = self.dd.historic_predictions[pair]
|
||||
|
||||
self.set_start_dry_live_date(strat_df)
|
||||
|
||||
for label in hist_preds_df.columns:
|
||||
if hist_preds_df[label].dtype == object:
|
||||
continue
|
||||
|
@ -657,7 +686,8 @@ class IFreqaiModel(ABC):
|
|||
for label in full_labels:
|
||||
if self.dd.historic_predictions[dk.pair][label].dtype == object:
|
||||
continue
|
||||
f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
f = spy.stats.norm.fit(
|
||||
self.dd.historic_predictions[dk.pair][label].tail(num_candles))
|
||||
dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1]
|
||||
|
||||
return
|
||||
|
@ -778,16 +808,16 @@ class IFreqaiModel(ABC):
|
|||
self.pair_it = 1
|
||||
self.current_candle = self.dd.current_candle
|
||||
|
||||
def ensure_data_exists(self, dataframe_backtest: DataFrame,
|
||||
def ensure_data_exists(self, len_dataframe_backtest: int,
|
||||
tr_backtest: TimeRange, pair: str) -> bool:
|
||||
"""
|
||||
Check if the dataframe is empty, if not, report useful information to user.
|
||||
:param dataframe_backtest: the backtesting dataframe, maybe empty.
|
||||
:param len_dataframe_backtest: the len of backtesting dataframe
|
||||
:param tr_backtest: current backtesting timerange.
|
||||
:param pair: current pair
|
||||
:return: if the data exists or not
|
||||
"""
|
||||
if self.config.get("freqai_backtest_live_models", False) and len(dataframe_backtest) == 0:
|
||||
if self.config.get("freqai_backtest_live_models", False) and len_dataframe_backtest == 0:
|
||||
logger.info(f"No data found for pair {pair} from "
|
||||
f"from { tr_backtest.start_fmt} to {tr_backtest.stop_fmt}. "
|
||||
"Probably more than one training within the same candle period.")
|
||||
|
@ -811,6 +841,81 @@ class IFreqaiModel(ABC):
|
|||
f"to {tr_train.stop_fmt}, {train_it}/{total_trains} "
|
||||
"trains"
|
||||
)
|
||||
|
||||
def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen):
|
||||
"""
|
||||
Apply fit_live_predictions function in backtesting with a dummy historic_predictions
|
||||
The loop is required to simulate dry/live operation, as it is not possible to predict
|
||||
the type of logic implemented by the user.
|
||||
:param dk: datakitchen object
|
||||
"""
|
||||
fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0)
|
||||
if fit_live_predictions_candles:
|
||||
logger.info("Applying fit_live_predictions in backtesting")
|
||||
label_columns = [col for col in dk.full_df.columns if (
|
||||
col.startswith("&") and
|
||||
not (col.startswith("&") and col.endswith("_mean")) and
|
||||
not (col.startswith("&") and col.endswith("_std")) and
|
||||
col not in self.dk.data["extra_returns_per_train"])
|
||||
]
|
||||
|
||||
for index in range(len(dk.full_df)):
|
||||
if index >= fit_live_predictions_candles:
|
||||
self.dd.historic_predictions[self.dk.pair] = (
|
||||
dk.full_df.iloc[index - fit_live_predictions_candles:index])
|
||||
self.fit_live_predictions(self.dk, self.dk.pair)
|
||||
for label in label_columns:
|
||||
if dk.full_df[label].dtype == object:
|
||||
continue
|
||||
if "labels_mean" in self.dk.data:
|
||||
dk.full_df.at[index, f"{label}_mean"] = (
|
||||
self.dk.data["labels_mean"][label])
|
||||
if "labels_std" in self.dk.data:
|
||||
dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label]
|
||||
|
||||
for extra_col in self.dk.data["extra_returns_per_train"]:
|
||||
dk.full_df.at[index, f"{extra_col}"] = (
|
||||
self.dk.data["extra_returns_per_train"][extra_col])
|
||||
|
||||
return
|
||||
|
||||
def update_metadata(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Update global metadata and save the updated json file
|
||||
:param metadata: new global metadata dict
|
||||
"""
|
||||
self.dd.save_global_metadata_to_disk(metadata)
|
||||
self.metadata = metadata
|
||||
|
||||
def set_start_dry_live_date(self, live_dataframe: DataFrame):
|
||||
key_name = "start_dry_live_date"
|
||||
if key_name not in self.metadata:
|
||||
metadata = self.metadata
|
||||
metadata[key_name] = int(
|
||||
pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp())
|
||||
self.update_metadata(metadata)
|
||||
|
||||
def start_backtesting_from_historic_predictions(
|
||||
self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen
|
||||
) -> FreqaiDataKitchen:
|
||||
"""
|
||||
:param dataframe: DataFrame = strategy passed dataframe
|
||||
:param metadata: Dict = pair metadata
|
||||
:param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
:return:
|
||||
FreqaiDataKitchen = Data management/analysis tool associated to present pair only
|
||||
"""
|
||||
pair = metadata["pair"]
|
||||
dk.return_dataframe = dataframe
|
||||
saved_dataframe = self.dd.historic_predictions[pair]
|
||||
columns_to_drop = list(set(saved_dataframe.columns).intersection(
|
||||
dk.return_dataframe.columns))
|
||||
dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop))
|
||||
dk.return_dataframe = pd.merge(
|
||||
dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred")
|
||||
# dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0)
|
||||
return dk
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
|
||||
|
|
145
freqtrade/freqai/prediction_models/ReinforcementLearner.py
Normal file
145
freqtrade/freqai/prediction_models/ReinforcementLearner.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
import torch as th
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||
"""
|
||||
Reinforcement Learning Model prediction model.
|
||||
|
||||
Users can inherit from this class to make their own RL model with custom
|
||||
environment/training controls. Define the file as follows:
|
||||
|
||||
```
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
|
||||
class MyCoolRLModel(ReinforcementLearner):
|
||||
```
|
||||
|
||||
Save the file to `user_data/freqaimodels`, then run it with:
|
||||
|
||||
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
|
||||
|
||||
Here the users can override any of the functions
|
||||
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
|
||||
is where the user overrides `MyRLEnv` (see below), to define custom
|
||||
`calculate_reward()` function, or to override any other parts of the environment.
|
||||
|
||||
This class also allows users to override any other part of the IFreqaiModel tree.
|
||||
For example, the user can override `def fit()` or `def train()` or `def predict()`
|
||||
to take fine-tuned control over these processes.
|
||||
|
||||
Another common override may be `def data_cleaning_predict()` where the user can
|
||||
take fine-tuned control over the data handling pipeline.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs):
|
||||
"""
|
||||
User customizable fit method
|
||||
:param data_dictionary: dict = common data dictionary containing all train/test
|
||||
features/labels/weights.
|
||||
:param dk: FreqaiDatakitchen = data kitchen for current pair.
|
||||
:return:
|
||||
model Any = trained model to be used for inference in dry/live/backtesting
|
||||
"""
|
||||
train_df = data_dictionary["train_features"]
|
||||
total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df)
|
||||
|
||||
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||
net_arch=self.net_arch)
|
||||
|
||||
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||
tensorboard_log=Path(
|
||||
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
||||
**self.freqai_info.get('model_training_parameters', {})
|
||||
)
|
||||
else:
|
||||
logger.info('Continual training activated - starting training from previously '
|
||||
'trained agent.')
|
||||
model = self.dd.model_dictionary[dk.pair]
|
||||
model.set_env(self.train_env)
|
||||
|
||||
model.learn(
|
||||
total_timesteps=int(total_timesteps),
|
||||
callback=[self.eval_callback, self.tensorboard_callback]
|
||||
)
|
||||
|
||||
if Path(dk.data_path / "best_model.zip").is_file():
|
||||
logger.info('Callback found a best model.')
|
||||
best_model = self.MODELCLASS.load(dk.data_path / "best_model")
|
||||
return best_model
|
||||
|
||||
logger.info('Couldnt find best model, using final model instead.')
|
||||
|
||||
return model
|
||||
|
||||
class MyRLEnv(Base5ActionRLEnv):
|
||||
"""
|
||||
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||
sets a custom reward based on profit and trade duration.
|
||||
"""
|
||||
|
||||
def calculate_reward(self, action: int) -> float:
|
||||
"""
|
||||
An example reward function. This is the one function that users will likely
|
||||
wish to inject their own creativity into.
|
||||
:param action: int = The action made by the agent for the current candle.
|
||||
:return:
|
||||
float = the reward to give to the agent for current step (used for optimization
|
||||
of weights in NN)
|
||||
"""
|
||||
# first, penalize if the action is not valid
|
||||
if not self._is_valid(action):
|
||||
self.tensorboard_log("is_valid")
|
||||
return -2
|
||||
|
||||
pnl = self.get_unrealized_profit()
|
||||
factor = 100.
|
||||
|
||||
# reward agent for entering trades
|
||||
if (action == Actions.Long_enter.value
|
||||
and self._position == Positions.Neutral):
|
||||
return 25
|
||||
if (action == Actions.Short_enter.value
|
||||
and self._position == Positions.Neutral):
|
||||
return 25
|
||||
# discourage agent from not entering trades
|
||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||
return -1
|
||||
|
||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||
trade_duration = self._current_tick - self._last_trade_tick # type: ignore
|
||||
|
||||
if trade_duration <= max_trade_duration:
|
||||
factor *= 1.5
|
||||
elif trade_duration > max_trade_duration:
|
||||
factor *= 0.5
|
||||
|
||||
# discourage sitting in position
|
||||
if (self._position in (Positions.Short, Positions.Long) and
|
||||
action == Actions.Neutral.value):
|
||||
return -1 * trade_duration / max_trade_duration
|
||||
|
||||
# close long
|
||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
|
||||
# close short
|
||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||
if pnl > self.profit_aim * self.rr:
|
||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||
return float(pnl * factor)
|
||||
|
||||
return 0.
|
|
@ -0,0 +1,57 @@
|
|||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from pandas import DataFrame
|
||||
from stable_baselines3.common.callbacks import EvalCallback
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
|
||||
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReinforcementLearner_multiproc(ReinforcementLearner):
|
||||
"""
|
||||
Demonstration of how to build vectorized environments
|
||||
"""
|
||||
|
||||
def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any],
|
||||
prices_train: DataFrame, prices_test: DataFrame,
|
||||
dk: FreqaiDataKitchen):
|
||||
"""
|
||||
User can override this if they are using a custom MyRLEnv
|
||||
:param data_dictionary: dict = common data dictionary containing train and test
|
||||
features/labels/weights.
|
||||
:param prices_train/test: DataFrame = dataframe comprised of the prices to be used in
|
||||
the environment during training
|
||||
or testing
|
||||
:param dk: FreqaiDataKitchen = the datakitchen for the current pair
|
||||
"""
|
||||
train_df = data_dictionary["train_features"]
|
||||
test_df = data_dictionary["test_features"]
|
||||
|
||||
env_info = self.pack_env_dict()
|
||||
|
||||
env_id = "train_env"
|
||||
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
|
||||
train_df, prices_train,
|
||||
monitor=True,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)])
|
||||
|
||||
eval_env_id = 'eval_env'
|
||||
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
||||
test_df, prices_test,
|
||||
monitor=True,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)])
|
||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||
render=False, eval_freq=len(train_df),
|
||||
best_model_save_path=str(dk.data_path))
|
||||
|
||||
actions = self.train_env.env_method("get_actions")[0]
|
||||
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
|
|
@ -14,6 +14,7 @@ from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
|
|||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange import timeframe_to_seconds
|
||||
from freqtrade.exchange.exchange import market_is_active
|
||||
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
|
||||
|
||||
|
@ -229,5 +230,6 @@ def get_timerange_backtest_live_models(config: Config) -> str:
|
|||
"""
|
||||
dk = FreqaiDataKitchen(config)
|
||||
models_path = dk.get_full_models_path(config)
|
||||
timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path)
|
||||
dd = FreqaiDataDrawer(models_path, config)
|
||||
timerange = dd.get_timerange_from_live_historic_predictions()
|
||||
return timerange.timerange_str
|
||||
|
|
|
@ -155,6 +155,8 @@ class FreqtradeBot(LoggingMixin):
|
|||
self.cancel_all_open_orders()
|
||||
|
||||
self.check_for_open_trades()
|
||||
except Exception as e:
|
||||
logger.warning(f'Exception during cleanup: {e.__class__.__name__} {e}')
|
||||
|
||||
finally:
|
||||
self.strategy.ft_bot_cleanup()
|
||||
|
@ -162,8 +164,13 @@ class FreqtradeBot(LoggingMixin):
|
|||
self.rpc.cleanup()
|
||||
if self.emc:
|
||||
self.emc.shutdown()
|
||||
Trade.commit()
|
||||
self.exchange.close()
|
||||
try:
|
||||
Trade.commit()
|
||||
except Exception:
|
||||
# Exeptions here will be happening if the db disappeared.
|
||||
# At which point we can no longer commit anyway.
|
||||
pass
|
||||
|
||||
def startup(self) -> None:
|
||||
"""
|
||||
|
@ -905,6 +912,7 @@ class FreqtradeBot(LoggingMixin):
|
|||
stake_amount=stake_amount,
|
||||
min_stake_amount=min_stake_amount,
|
||||
max_stake_amount=max_stake_amount,
|
||||
trade_amount=trade.stake_amount if trade else None,
|
||||
)
|
||||
|
||||
return enter_limit_requested, stake_amount, leverage
|
||||
|
@ -1151,7 +1159,7 @@ class FreqtradeBot(LoggingMixin):
|
|||
stoploss = (
|
||||
self.edge.stoploss(pair=trade.pair)
|
||||
if self.edge else
|
||||
self.strategy.stoploss / trade.leverage
|
||||
trade.stop_loss_pct / trade.leverage
|
||||
)
|
||||
if trade.is_short:
|
||||
stop_price = trade.open_rate * (1 - stoploss)
|
||||
|
|
|
@ -7,6 +7,8 @@ import logging
|
|||
import sys
|
||||
from typing import Any, List
|
||||
|
||||
from freqtrade.util.gc_setup import gc_set_threshold
|
||||
|
||||
|
||||
# check min. python version
|
||||
if sys.version_info < (3, 8): # pragma: no cover
|
||||
|
@ -36,6 +38,7 @@ def main(sysargv: List[str] = None) -> None:
|
|||
# Call subcommand.
|
||||
if 'func' in args:
|
||||
logger.info(f'freqtrade {__version__}')
|
||||
gc_set_threshold()
|
||||
return_code = args['func'](args)
|
||||
else:
|
||||
# No subcommand was issued.
|
||||
|
|
|
@ -301,3 +301,21 @@ def remove_entry_exit_signals(dataframe: pd.DataFrame):
|
|||
dataframe[SignalTagType.EXIT_TAG.value] = None
|
||||
|
||||
return dataframe
|
||||
|
||||
|
||||
def append_candles_to_dataframe(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Append the `right` dataframe to the `left` dataframe
|
||||
|
||||
:param left: The full dataframe you want appended to
|
||||
:param right: The new dataframe containing the data you want appended
|
||||
:returns: The dataframe with the right data in it
|
||||
"""
|
||||
if left.iloc[-1]['date'] != right.iloc[-1]['date']:
|
||||
left = pd.concat([left, right])
|
||||
|
||||
# Only keep the last 1500 candles in memory
|
||||
left = left[-1500:] if len(left) > 1500 else left
|
||||
left.reset_index(drop=True, inplace=True)
|
||||
|
||||
return left
|
||||
|
|
|
@ -692,10 +692,11 @@ class Backtesting:
|
|||
trade.orders.append(order)
|
||||
return trade
|
||||
|
||||
def _get_exit_trade_entry(self, trade: LocalTrade, row: Tuple) -> Optional[LocalTrade]:
|
||||
def _get_exit_trade_entry(
|
||||
self, trade: LocalTrade, row: Tuple, is_first: bool) -> Optional[LocalTrade]:
|
||||
exit_candle_time: datetime = row[DATE_IDX].to_pydatetime()
|
||||
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
if is_first and self.trading_mode == TradingMode.FUTURES:
|
||||
trade.funding_fees = self.exchange.calculate_funding_fees(
|
||||
self.futures_data[trade.pair],
|
||||
amount=trade.amount,
|
||||
|
@ -704,32 +705,7 @@ class Backtesting:
|
|||
close_date=exit_candle_time,
|
||||
)
|
||||
|
||||
if self.timeframe_detail and trade.pair in self.detail_data:
|
||||
exit_candle_end = exit_candle_time + timedelta(minutes=self.timeframe_min)
|
||||
|
||||
detail_data = self.detail_data[trade.pair]
|
||||
detail_data = detail_data.loc[
|
||||
(detail_data['date'] >= exit_candle_time) &
|
||||
(detail_data['date'] < exit_candle_end)
|
||||
].copy()
|
||||
if len(detail_data) == 0:
|
||||
# Fall back to "regular" data if no detail data was found for this candle
|
||||
return self._get_exit_trade_entry_for_candle(trade, row)
|
||||
detail_data.loc[:, 'enter_long'] = row[LONG_IDX]
|
||||
detail_data.loc[:, 'exit_long'] = row[ELONG_IDX]
|
||||
detail_data.loc[:, 'enter_short'] = row[SHORT_IDX]
|
||||
detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX]
|
||||
detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX]
|
||||
detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX]
|
||||
for det_row in detail_data[HEADERS].values.tolist():
|
||||
res = self._get_exit_trade_entry_for_candle(trade, det_row)
|
||||
if res:
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
else:
|
||||
return self._get_exit_trade_entry_for_candle(trade, row)
|
||||
return self._get_exit_trade_entry_for_candle(trade, row)
|
||||
|
||||
def get_valid_price_and_stake(
|
||||
self, pair: str, row: Tuple, propose_rate: float, stake_amount: float,
|
||||
|
@ -793,6 +769,7 @@ class Backtesting:
|
|||
stake_amount=stake_amount,
|
||||
min_stake_amount=min_stake_amount,
|
||||
max_stake_amount=max_stake_amount,
|
||||
trade_amount=trade.stake_amount if trade else None
|
||||
)
|
||||
|
||||
return propose_rate, stake_amount_val, leverage, min_stake_amount
|
||||
|
@ -1074,7 +1051,7 @@ class Backtesting:
|
|||
|
||||
def backtest_loop(
|
||||
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
|
||||
max_open_trades: int, open_trade_count_start: int) -> int:
|
||||
max_open_trades: int, open_trade_count_start: int, is_first: bool = True) -> int:
|
||||
"""
|
||||
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
|
||||
|
||||
|
@ -1092,9 +1069,11 @@ class Backtesting:
|
|||
# without positionstacking, we can only have one open trade per pair.
|
||||
# max_open_trades must be respected
|
||||
# don't open on the last row
|
||||
# We only open trades on the main candle, not on detail candles
|
||||
trade_dir = self.check_for_trade_entry(row)
|
||||
if (
|
||||
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
|
||||
and is_first
|
||||
and self.trade_slot_available(max_open_trades, open_trade_count_start)
|
||||
and current_time != end_date
|
||||
and trade_dir is not None
|
||||
|
@ -1120,7 +1099,7 @@ class Backtesting:
|
|||
|
||||
# 4. Create exit orders (if any)
|
||||
if not trade.open_order_id:
|
||||
self._get_exit_trade_entry(trade, row) # Place exit order if necessary
|
||||
self._get_exit_trade_entry(trade, row, is_first) # Place exit order if necessary
|
||||
|
||||
# 5. Process exit orders.
|
||||
order = trade.select_order(trade.exit_side, is_open=True)
|
||||
|
@ -1171,7 +1150,6 @@ class Backtesting:
|
|||
|
||||
self.progress.init_step(BacktestState.BACKTEST, int(
|
||||
(end_date - start_date) / timedelta(minutes=self.timeframe_min)))
|
||||
|
||||
# Loop timerange and get candle for each pair at that point in time
|
||||
while current_time <= end_date:
|
||||
open_trade_count_start = LocalTrade.bt_open_open_trade_count
|
||||
|
@ -1185,9 +1163,37 @@ class Backtesting:
|
|||
row_index += 1
|
||||
indexes[pair] = row_index
|
||||
self.dataprovider._set_dataframe_max_index(row_index)
|
||||
current_detail_time: datetime = row[DATE_IDX].to_pydatetime()
|
||||
if self.timeframe_detail and pair in self.detail_data:
|
||||
exit_candle_end = current_detail_time + timedelta(minutes=self.timeframe_min)
|
||||
|
||||
open_trade_count_start = self.backtest_loop(
|
||||
row, pair, current_time, end_date, max_open_trades, open_trade_count_start)
|
||||
detail_data = self.detail_data[pair]
|
||||
detail_data = detail_data.loc[
|
||||
(detail_data['date'] >= current_detail_time) &
|
||||
(detail_data['date'] < exit_candle_end)
|
||||
].copy()
|
||||
if len(detail_data) == 0:
|
||||
# Fall back to "regular" data if no detail data was found for this candle
|
||||
open_trade_count_start = self.backtest_loop(
|
||||
row, pair, current_time, end_date, max_open_trades,
|
||||
open_trade_count_start)
|
||||
detail_data.loc[:, 'enter_long'] = row[LONG_IDX]
|
||||
detail_data.loc[:, 'exit_long'] = row[ELONG_IDX]
|
||||
detail_data.loc[:, 'enter_short'] = row[SHORT_IDX]
|
||||
detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX]
|
||||
detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX]
|
||||
detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX]
|
||||
is_first = True
|
||||
current_time_det = current_time
|
||||
for det_row in detail_data[HEADERS].values.tolist():
|
||||
open_trade_count_start = self.backtest_loop(
|
||||
det_row, pair, current_time_det, end_date, max_open_trades,
|
||||
open_trade_count_start, is_first)
|
||||
current_time_det += timedelta(minutes=self.timeframe_detail_min)
|
||||
is_first = False
|
||||
else:
|
||||
open_trade_count_start = self.backtest_loop(
|
||||
row, pair, current_time, end_date, max_open_trades, open_trade_count_start)
|
||||
|
||||
# Move time one configured time_interval ahead.
|
||||
self.progress.increment()
|
||||
|
|
|
@ -109,11 +109,10 @@ def migrate_trades_and_orders_table(
|
|||
else:
|
||||
is_short = get_column_def(cols, 'is_short', '0')
|
||||
|
||||
# Margin Properties
|
||||
# Futures Properties
|
||||
interest_rate = get_column_def(cols, 'interest_rate', '0.0')
|
||||
|
||||
# Futures properties
|
||||
funding_fees = get_column_def(cols, 'funding_fees', '0.0')
|
||||
max_stake_amount = get_column_def(cols, 'max_stake_amount', 'stake_amount')
|
||||
|
||||
# If ticker-interval existed use that, else null.
|
||||
if has_column(cols, 'ticker_interval'):
|
||||
|
@ -162,7 +161,8 @@ def migrate_trades_and_orders_table(
|
|||
timeframe, open_trade_value, close_profit_abs,
|
||||
trading_mode, leverage, liquidation_price, is_short,
|
||||
interest_rate, funding_fees, realized_profit,
|
||||
amount_precision, price_precision, precision_mode, contract_size
|
||||
amount_precision, price_precision, precision_mode, contract_size,
|
||||
max_stake_amount
|
||||
)
|
||||
select id, lower(exchange), pair, {base_currency} base_currency,
|
||||
{stake_currency} stake_currency,
|
||||
|
@ -190,7 +190,8 @@ def migrate_trades_and_orders_table(
|
|||
{is_short} is_short, {interest_rate} interest_rate,
|
||||
{funding_fees} funding_fees, {realized_profit} realized_profit,
|
||||
{amount_precision} amount_precision, {price_precision} price_precision,
|
||||
{precision_mode} precision_mode, {contract_size} contract_size
|
||||
{precision_mode} precision_mode, {contract_size} contract_size,
|
||||
{max_stake_amount} max_stake_amount
|
||||
from {trade_back_name}
|
||||
"""))
|
||||
|
||||
|
@ -310,8 +311,8 @@ def check_migrate(engine, decl_base, previous_tables) -> None:
|
|||
# if ('orders' not in previous_tables
|
||||
# or not has_column(cols_orders, 'funding_fee')):
|
||||
migrating = False
|
||||
# if not has_column(cols_trades, 'contract_size'):
|
||||
if not has_column(cols_orders, 'funding_fee'):
|
||||
# if not has_column(cols_orders, 'funding_fee'):
|
||||
if not has_column(cols_trades, 'max_stake_amount'):
|
||||
migrating = True
|
||||
logger.info(f"Running database migration for trades - "
|
||||
f"backup: {table_back_name}, {order_table_bak_name}")
|
||||
|
|
|
@ -87,7 +87,7 @@ class PairLocks():
|
|||
Get the lock that expires the latest for the pair given.
|
||||
"""
|
||||
locks = PairLocks.get_pair_locks(pair, now, side=side)
|
||||
locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True)
|
||||
locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True)
|
||||
return locks[0] if locks else None
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -293,6 +293,7 @@ class LocalTrade():
|
|||
close_profit: Optional[float] = None
|
||||
close_profit_abs: Optional[float] = None
|
||||
stake_amount: float = 0.0
|
||||
max_stake_amount: float = 0.0
|
||||
amount: float = 0.0
|
||||
amount_requested: Optional[float] = None
|
||||
open_date: datetime
|
||||
|
@ -397,12 +398,6 @@ class LocalTrade():
|
|||
def close_date_utc(self):
|
||||
return self.close_date.replace(tzinfo=timezone.utc)
|
||||
|
||||
@property
|
||||
def enter_side(self) -> str:
|
||||
""" DEPRECATED, please use entry_side instead"""
|
||||
# TODO: Please remove me after 2022.5
|
||||
return self.entry_side
|
||||
|
||||
@property
|
||||
def entry_side(self) -> str:
|
||||
if self.is_short:
|
||||
|
@ -475,8 +470,8 @@ class LocalTrade():
|
|||
'amount': round(self.amount, 8),
|
||||
'amount_requested': round(self.amount_requested, 8) if self.amount_requested else None,
|
||||
'stake_amount': round(self.stake_amount, 8),
|
||||
'max_stake_amount': round(self.max_stake_amount, 8) if self.max_stake_amount else None,
|
||||
'strategy': self.strategy,
|
||||
'buy_tag': self.enter_tag,
|
||||
'enter_tag': self.enter_tag,
|
||||
'timeframe': self.timeframe,
|
||||
|
||||
|
@ -513,7 +508,6 @@ class LocalTrade():
|
|||
'profit_pct': round(self.close_profit * 100, 2) if self.close_profit else None,
|
||||
'profit_abs': self.close_profit_abs,
|
||||
|
||||
'sell_reason': self.exit_reason, # Deprecated
|
||||
'exit_reason': self.exit_reason,
|
||||
'exit_order_status': self.exit_order_status,
|
||||
'stop_loss_abs': self.stop_loss,
|
||||
|
@ -882,6 +876,7 @@ class LocalTrade():
|
|||
ZERO = FtPrecise(0.0)
|
||||
current_amount = FtPrecise(0.0)
|
||||
current_stake = FtPrecise(0.0)
|
||||
max_stake_amount = FtPrecise(0.0)
|
||||
total_stake = 0.0 # Total stake after all buy orders (does not subtract!)
|
||||
avg_price = FtPrecise(0.0)
|
||||
close_profit = 0.0
|
||||
|
@ -923,7 +918,9 @@ class LocalTrade():
|
|||
exit_rate, amount=exit_amount, open_rate=avg_price)
|
||||
else:
|
||||
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
|
||||
max_stake_amount += (tmp_amount * price)
|
||||
self.funding_fees = funding_fees
|
||||
self.max_stake_amount = float(max_stake_amount)
|
||||
|
||||
if close_profit:
|
||||
self.close_profit = close_profit
|
||||
|
@ -1175,6 +1172,7 @@ class Trade(_DECL_BASE, LocalTrade):
|
|||
close_profit = Column(Float)
|
||||
close_profit_abs = Column(Float)
|
||||
stake_amount = Column(Float, nullable=False)
|
||||
max_stake_amount = Column(Float)
|
||||
amount = Column(Float)
|
||||
amount_requested = Column(Float)
|
||||
open_date = Column(DateTime, nullable=False, default=datetime.utcnow)
|
||||
|
|
206
freqtrade/plugins/pairlist/RemotePairList.py
Normal file
206
freqtrade/plugins/pairlist/RemotePairList.py
Normal file
|
@ -0,0 +1,206 @@
|
|||
"""
|
||||
Remote PairList provider
|
||||
|
||||
Provides pair list fetched from a remote source
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import requests
|
||||
from cachetools import TTLCache
|
||||
|
||||
from freqtrade import __version__
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exchange.types import Tickers
|
||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RemotePairList(IPairList):
|
||||
|
||||
def __init__(self, exchange, pairlistmanager,
|
||||
config: Config, pairlistconfig: Dict[str, Any],
|
||||
pairlist_pos: int) -> None:
|
||||
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
|
||||
|
||||
if 'number_assets' not in self._pairlistconfig:
|
||||
raise OperationalException(
|
||||
'`number_assets` not specified. Please check your configuration '
|
||||
'for "pairlist.config.number_assets"')
|
||||
|
||||
if 'pairlist_url' not in self._pairlistconfig:
|
||||
raise OperationalException(
|
||||
'`pairlist_url` not specified. Please check your configuration '
|
||||
'for "pairlist.config.pairlist_url"')
|
||||
|
||||
self._number_pairs = self._pairlistconfig['number_assets']
|
||||
self._refresh_period: int = self._pairlistconfig.get('refresh_period', 1800)
|
||||
self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True)
|
||||
self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period)
|
||||
self._pairlist_url = self._pairlistconfig.get('pairlist_url', '')
|
||||
self._read_timeout = self._pairlistconfig.get('read_timeout', 60)
|
||||
self._bearer_token = self._pairlistconfig.get('bearer_token', '')
|
||||
self._init_done = False
|
||||
self._last_pairlist: List[Any] = list()
|
||||
|
||||
@property
|
||||
def needstickers(self) -> bool:
|
||||
"""
|
||||
Boolean property defining if tickers are necessary.
|
||||
If no Pairlist requires tickers, an empty Dict is passed
|
||||
as tickers argument to filter_pairlist
|
||||
"""
|
||||
return False
|
||||
|
||||
def short_desc(self) -> str:
|
||||
"""
|
||||
Short whitelist method description - used for startup-messages
|
||||
"""
|
||||
return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist."
|
||||
|
||||
def process_json(self, jsonparse) -> List[str]:
|
||||
|
||||
pairlist = jsonparse.get('pairs', [])
|
||||
remote_refresh_period = int(jsonparse.get('refresh_period', self._refresh_period))
|
||||
|
||||
if self._refresh_period < remote_refresh_period:
|
||||
self.log_once(f'Refresh Period has been increased from {self._refresh_period}'
|
||||
f' to minimum allowed: {remote_refresh_period} from Remote.', logger.info)
|
||||
|
||||
self._refresh_period = remote_refresh_period
|
||||
self._pair_cache = TTLCache(maxsize=1, ttl=remote_refresh_period)
|
||||
|
||||
self._init_done = True
|
||||
|
||||
return pairlist
|
||||
|
||||
def return_last_pairlist(self) -> List[str]:
|
||||
if self._keep_pairlist_on_failure:
|
||||
pairlist = self._last_pairlist
|
||||
self.log_once('Keeping last fetched pairlist', logger.info)
|
||||
else:
|
||||
pairlist = []
|
||||
|
||||
return pairlist
|
||||
|
||||
def fetch_pairlist(self) -> Tuple[List[str], float]:
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Freqtrade/' + __version__ + ' Remotepairlist'
|
||||
}
|
||||
|
||||
if self._bearer_token:
|
||||
headers['Authorization'] = f'Bearer {self._bearer_token}'
|
||||
|
||||
try:
|
||||
response = requests.get(self._pairlist_url, headers=headers,
|
||||
timeout=self._read_timeout)
|
||||
content_type = response.headers.get('content-type')
|
||||
time_elapsed = response.elapsed.total_seconds()
|
||||
|
||||
if "application/json" in str(content_type):
|
||||
jsonparse = response.json()
|
||||
|
||||
try:
|
||||
pairlist = self.process_json(jsonparse)
|
||||
except Exception as e:
|
||||
|
||||
if self._init_done:
|
||||
pairlist = self.return_last_pairlist()
|
||||
logger.warning(f'Error while processing JSON data: {type(e)}')
|
||||
else:
|
||||
raise OperationalException(f'Error while processing JSON data: {type(e)}')
|
||||
|
||||
else:
|
||||
if self._init_done:
|
||||
self.log_once(f'Error: RemotePairList is not of type JSON: '
|
||||
f' {self._pairlist_url}', logger.info)
|
||||
pairlist = self.return_last_pairlist()
|
||||
else:
|
||||
raise OperationalException('RemotePairList is not of type JSON, abort.')
|
||||
|
||||
except requests.exceptions.RequestException:
|
||||
self.log_once(f'Was not able to fetch pairlist from:'
|
||||
f' {self._pairlist_url}', logger.info)
|
||||
|
||||
pairlist = self.return_last_pairlist()
|
||||
|
||||
time_elapsed = 0
|
||||
|
||||
return pairlist, time_elapsed
|
||||
|
||||
def gen_pairlist(self, tickers: Tickers) -> List[str]:
|
||||
"""
|
||||
Generate the pairlist
|
||||
:param tickers: Tickers (from exchange.get_tickers). May be cached.
|
||||
:return: List of pairs
|
||||
"""
|
||||
|
||||
if self._init_done:
|
||||
pairlist = self._pair_cache.get('pairlist')
|
||||
else:
|
||||
pairlist = []
|
||||
|
||||
time_elapsed = 0.0
|
||||
|
||||
if pairlist:
|
||||
# Item found - no refresh necessary
|
||||
return pairlist.copy()
|
||||
else:
|
||||
if self._pairlist_url.startswith("file:///"):
|
||||
filename = self._pairlist_url.split("file:///", 1)[1]
|
||||
file_path = Path(filename)
|
||||
|
||||
if file_path.exists():
|
||||
with open(filename) as json_file:
|
||||
# Load the JSON data into a dictionary
|
||||
jsonparse = json.load(json_file)
|
||||
|
||||
try:
|
||||
pairlist = self.process_json(jsonparse)
|
||||
except Exception as e:
|
||||
if self._init_done:
|
||||
pairlist = self.return_last_pairlist()
|
||||
logger.warning(f'Error while processing JSON data: {type(e)}')
|
||||
else:
|
||||
raise OperationalException('Error while processing'
|
||||
f'JSON data: {type(e)}')
|
||||
else:
|
||||
raise ValueError(f"{self._pairlist_url} does not exist.")
|
||||
else:
|
||||
# Fetch Pairlist from Remote URL
|
||||
pairlist, time_elapsed = self.fetch_pairlist()
|
||||
|
||||
self.log_once(f"Fetched pairs: {pairlist}", logger.debug)
|
||||
|
||||
pairlist = self._whitelist_for_active_markets(pairlist)
|
||||
pairlist = pairlist[:self._number_pairs]
|
||||
|
||||
self._pair_cache['pairlist'] = pairlist.copy()
|
||||
|
||||
if time_elapsed != 0.0:
|
||||
self.log_once(f'Pairlist Fetched in {time_elapsed} seconds.', logger.info)
|
||||
else:
|
||||
self.log_once('Fetched Pairlist.', logger.info)
|
||||
|
||||
self._last_pairlist = list(pairlist)
|
||||
|
||||
return pairlist
|
||||
|
||||
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
|
||||
"""
|
||||
Filters and sorts pairlist and returns the whitelist again.
|
||||
Called on each bot iteration - please use internal caching if necessary
|
||||
:param pairlist: pairlist to filter or sort
|
||||
:param tickers: Tickers (from exchange.get_tickers). May be cached.
|
||||
:return: new whitelist
|
||||
"""
|
||||
rpl_pairlist = self.gen_pairlist(tickers)
|
||||
merged_list = pairlist + rpl_pairlist
|
||||
merged_list = sorted(set(merged_list), key=merged_list.index)
|
||||
return merged_list
|
|
@ -135,7 +135,7 @@ class VolumePairList(IPairList):
|
|||
filtered_tickers = [
|
||||
v for k, v in tickers.items()
|
||||
if (self._exchange.get_pair_quote_currency(k) == self._stake_currency
|
||||
and (self._use_range or v[self._sort_key] is not None)
|
||||
and (self._use_range or v.get(self._sort_key) is not None)
|
||||
and v['symbol'] in _pairlist)]
|
||||
pairlist = [s['symbol'] for s in filtered_tickers]
|
||||
else:
|
||||
|
@ -218,7 +218,7 @@ class VolumePairList(IPairList):
|
|||
else:
|
||||
filtered_tickers[i]['quoteVolume'] = 0
|
||||
else:
|
||||
# Tickers mode - filter based on incomming pairlist.
|
||||
# Tickers mode - filter based on incoming pairlist.
|
||||
filtered_tickers = [v for k, v in tickers.items() if k in pairlist]
|
||||
|
||||
if self._min_value > 0:
|
||||
|
|
|
@ -81,8 +81,6 @@ async def validate_ws_token(
|
|||
except HTTPException:
|
||||
pass
|
||||
|
||||
# No checks passed, deny the connection
|
||||
logger.debug("Denying websocket request.")
|
||||
# If it doesn't match, close the websocket connection
|
||||
await ws.close(code=status.WS_1008_POLICY_VIOLATION)
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ from freqtrade.configuration.config_validation import validate_config_consistenc
|
|||
from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result
|
||||
from freqtrade.enums import BacktestState
|
||||
from freqtrade.exceptions import DependencyException
|
||||
from freqtrade.misc import deep_merge_dicts
|
||||
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
|
||||
BacktestResponse)
|
||||
from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode
|
||||
|
@ -37,10 +38,11 @@ async def api_start_backtest(bt_settings: BacktestRequest, background_tasks: Bac
|
|||
|
||||
btconfig = deepcopy(config)
|
||||
settings = dict(bt_settings)
|
||||
if settings.get('freqai', None) is not None:
|
||||
settings['freqai'] = dict(settings['freqai'])
|
||||
# Pydantic models will contain all keys, but non-provided ones are None
|
||||
for setting in settings.keys():
|
||||
if settings[setting] is not None:
|
||||
btconfig[setting] = settings[setting]
|
||||
|
||||
btconfig = deep_merge_dicts(settings, btconfig, allow_null_overrides=False)
|
||||
try:
|
||||
btconfig['stake_amount'] = float(btconfig['stake_amount'])
|
||||
except ValueError:
|
||||
|
|
|
@ -217,8 +217,8 @@ class TradeSchema(BaseModel):
|
|||
amount: float
|
||||
amount_requested: float
|
||||
stake_amount: float
|
||||
max_stake_amount: Optional[float]
|
||||
strategy: str
|
||||
buy_tag: Optional[str] # Deprecated
|
||||
enter_tag: Optional[str]
|
||||
timeframe: int
|
||||
fee_open: Optional[float]
|
||||
|
@ -243,7 +243,6 @@ class TradeSchema(BaseModel):
|
|||
profit_pct: Optional[float]
|
||||
profit_abs: Optional[float]
|
||||
profit_fiat: Optional[float]
|
||||
sell_reason: Optional[str] # Deprecated
|
||||
exit_reason: Optional[str]
|
||||
exit_order_status: Optional[str]
|
||||
stop_loss_abs: Optional[float]
|
||||
|
@ -372,6 +371,10 @@ class StrategyListResponse(BaseModel):
|
|||
strategies: List[str]
|
||||
|
||||
|
||||
class FreqAIModelListResponse(BaseModel):
|
||||
freqaimodels: List[str]
|
||||
|
||||
|
||||
class StrategyResponse(BaseModel):
|
||||
strategy: str
|
||||
code: str
|
||||
|
@ -410,6 +413,10 @@ class PairHistory(BaseModel):
|
|||
}
|
||||
|
||||
|
||||
class BacktestFreqAIInputs(BaseModel):
|
||||
identifier: str
|
||||
|
||||
|
||||
class BacktestRequest(BaseModel):
|
||||
strategy: str
|
||||
timeframe: Optional[str]
|
||||
|
@ -419,6 +426,9 @@ class BacktestRequest(BaseModel):
|
|||
stake_amount: Optional[str]
|
||||
enable_protections: bool
|
||||
dry_run_wallet: Optional[float]
|
||||
backtest_cache: Optional[str]
|
||||
freqaimodel: Optional[str]
|
||||
freqai: Optional[BacktestFreqAIInputs]
|
||||
|
||||
|
||||
class BacktestResponse(BaseModel):
|
||||
|
|
|
@ -13,12 +13,13 @@ from freqtrade.rpc import RPC
|
|||
from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload,
|
||||
BlacklistResponse, Count, Daily,
|
||||
DeleteLockRequest, DeleteTrade, ForceEnterPayload,
|
||||
ForceEnterResponse, ForceExitPayload, Health,
|
||||
Locks, Logs, OpenTradeSchema, PairHistory,
|
||||
PerformanceEntry, Ping, PlotConfig, Profit,
|
||||
ResultMsg, ShowConfig, Stats, StatusMsg,
|
||||
StrategyListResponse, StrategyResponse, SysInfo,
|
||||
Version, WhitelistResponse)
|
||||
ForceEnterResponse, ForceExitPayload,
|
||||
FreqAIModelListResponse, Health, Locks, Logs,
|
||||
OpenTradeSchema, PairHistory, PerformanceEntry,
|
||||
Ping, PlotConfig, Profit, ResultMsg, ShowConfig,
|
||||
Stats, StatusMsg, StrategyListResponse,
|
||||
StrategyResponse, SysInfo, Version,
|
||||
WhitelistResponse)
|
||||
from freqtrade.rpc.api_server.deps import get_config, get_exchange, get_rpc, get_rpc_optional
|
||||
from freqtrade.rpc.rpc import RPCException
|
||||
|
||||
|
@ -37,7 +38,9 @@ logger = logging.getLogger(__name__)
|
|||
# 2.16: Additional daily metrics
|
||||
# 2.17: Forceentry - leverage, partial force_exit
|
||||
# 2.20: Add websocket endpoints
|
||||
API_VERSION = 2.20
|
||||
# 2.21: Add new_candle messagetype
|
||||
# 2.22: Add FreqAI to backtesting
|
||||
API_VERSION = 2.22
|
||||
|
||||
# Public API, requires no auth.
|
||||
router_public = APIRouter()
|
||||
|
@ -278,6 +281,16 @@ def get_strategy(strategy: str, config=Depends(get_config)):
|
|||
}
|
||||
|
||||
|
||||
@router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai'])
|
||||
def list_freqaimodels(config=Depends(get_config)):
|
||||
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
|
||||
strategies = FreqaiModelResolver.search_all_objects(
|
||||
config, False)
|
||||
strategies = sorted(strategies, key=lambda x: x['name'])
|
||||
|
||||
return {'freqaimodels': [x['name'] for x in strategies]}
|
||||
|
||||
|
||||
@router.get('/available_pairs', response_model=AvailablePairs, tags=['candle data'])
|
||||
def list_available_pairs(timeframe: Optional[str] = None, stake_currency: Optional[str] = None,
|
||||
candletype: Optional[CandleType] = None, config=Depends(get_config)):
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
import logging
|
||||
import time
|
||||
from typing import Any, Dict
|
||||
|
||||
from fastapi import APIRouter, Depends, WebSocketDisconnect
|
||||
from fastapi.websockets import WebSocket, WebSocketState
|
||||
from fastapi import APIRouter, Depends
|
||||
from fastapi.websockets import WebSocket
|
||||
from pydantic import ValidationError
|
||||
from websockets.exceptions import WebSocketException
|
||||
|
||||
from freqtrade.enums import RPCMessageType, RPCRequestType
|
||||
from freqtrade.rpc.api_server.api_auth import validate_ws_token
|
||||
from freqtrade.rpc.api_server.deps import get_channel_manager, get_rpc
|
||||
from freqtrade.rpc.api_server.ws import WebSocketChannel
|
||||
from freqtrade.rpc.api_server.ws.channel import ChannelManager
|
||||
from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc
|
||||
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema,
|
||||
WSRequestSchema, WSWhitelistMessage)
|
||||
from freqtrade.rpc.rpc import RPC
|
||||
|
@ -22,23 +22,35 @@ logger = logging.getLogger(__name__)
|
|||
router = APIRouter()
|
||||
|
||||
|
||||
async def is_websocket_alive(ws: WebSocket) -> bool:
|
||||
async def channel_reader(channel: WebSocketChannel, rpc: RPC):
|
||||
"""
|
||||
Check if a FastAPI Websocket is still open
|
||||
Iterate over the messages from the channel and process the request
|
||||
"""
|
||||
if (
|
||||
ws.application_state == WebSocketState.CONNECTED and
|
||||
ws.client_state == WebSocketState.CONNECTED
|
||||
):
|
||||
return True
|
||||
return False
|
||||
async for message in channel:
|
||||
await _process_consumer_request(message, channel, rpc)
|
||||
|
||||
|
||||
async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream):
|
||||
"""
|
||||
Iterate over messages in the message stream and send them
|
||||
"""
|
||||
async for message, ts in message_stream:
|
||||
if channel.subscribed_to(message.get('type')):
|
||||
# Log a warning if this channel is behind
|
||||
# on the message stream by a lot
|
||||
if (time.time() - ts) > 60:
|
||||
logger.warning(f"Channel {channel} is behind MessageStream by 1 minute,"
|
||||
" this can cause a memory leak if you see this message"
|
||||
" often, consider reducing pair list size or amount of"
|
||||
" consumers.")
|
||||
|
||||
await channel.send(message, timeout=True)
|
||||
|
||||
|
||||
async def _process_consumer_request(
|
||||
request: Dict[str, Any],
|
||||
channel: WebSocketChannel,
|
||||
rpc: RPC,
|
||||
channel_manager: ChannelManager
|
||||
rpc: RPC
|
||||
):
|
||||
"""
|
||||
Validate and handle a request from a websocket consumer
|
||||
|
@ -74,65 +86,30 @@ async def _process_consumer_request(
|
|||
|
||||
# Format response
|
||||
response = WSWhitelistMessage(data=whitelist)
|
||||
# Send it back
|
||||
await channel_manager.send_direct(channel, response.dict(exclude_none=True))
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
elif type == RPCRequestType.ANALYZED_DF:
|
||||
limit = None
|
||||
|
||||
if data:
|
||||
# Limit the amount of candles per dataframe to 'limit' or 1500
|
||||
limit = max(data.get('limit', 1500), 1500)
|
||||
# Limit the amount of candles per dataframe to 'limit' or 1500
|
||||
limit = min(data.get('limit', 1500), 1500) if data else None
|
||||
pair = data.get('pair', None) if data else None
|
||||
|
||||
# For every pair in the generator, send a separate message
|
||||
for message in rpc._ws_request_analyzed_df(limit):
|
||||
for message in rpc._ws_request_analyzed_df(limit, pair):
|
||||
# Format response
|
||||
response = WSAnalyzedDFMessage(data=message)
|
||||
await channel_manager.send_direct(channel, response.dict(exclude_none=True))
|
||||
await channel.send(response.dict(exclude_none=True))
|
||||
|
||||
|
||||
@router.websocket("/message/ws")
|
||||
async def message_endpoint(
|
||||
ws: WebSocket,
|
||||
websocket: WebSocket,
|
||||
token: str = Depends(validate_ws_token),
|
||||
rpc: RPC = Depends(get_rpc),
|
||||
channel_manager=Depends(get_channel_manager),
|
||||
token: str = Depends(validate_ws_token)
|
||||
message_stream: MessageStream = Depends(get_message_stream)
|
||||
):
|
||||
"""
|
||||
Message WebSocket endpoint, facilitates sending RPC messages
|
||||
"""
|
||||
try:
|
||||
channel = await channel_manager.on_connect(ws)
|
||||
if await is_websocket_alive(ws):
|
||||
|
||||
logger.info(f"Consumer connected - {channel}")
|
||||
|
||||
# Keep connection open until explicitly closed, and process requests
|
||||
try:
|
||||
while not channel.is_closed():
|
||||
request = await channel.recv()
|
||||
|
||||
# Process the request here
|
||||
await _process_consumer_request(request, channel, rpc, channel_manager)
|
||||
|
||||
except (WebSocketDisconnect, WebSocketException):
|
||||
# Handle client disconnects
|
||||
logger.info(f"Consumer disconnected - {channel}")
|
||||
except RuntimeError:
|
||||
# Handle cases like -
|
||||
# RuntimeError('Cannot call "send" once a closed message has been sent')
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.info(f"Consumer connection failed - {channel}: {e}")
|
||||
logger.debug(e, exc_info=e)
|
||||
|
||||
except RuntimeError:
|
||||
# WebSocket was closed
|
||||
# Do nothing
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to serve - {ws.client}")
|
||||
# Log tracebacks to keep track of what errors are happening
|
||||
logger.exception(e)
|
||||
finally:
|
||||
if channel:
|
||||
await channel_manager.on_disconnect(ws)
|
||||
if token:
|
||||
async with create_channel(websocket) as channel:
|
||||
await channel.run_channel_tasks(
|
||||
channel_reader(channel, rpc),
|
||||
channel_broadcaster(channel, message_stream)
|
||||
)
|
||||
|
|
|
@ -41,8 +41,8 @@ def get_exchange(config=Depends(get_config)):
|
|||
return ApiServer._exchange
|
||||
|
||||
|
||||
def get_channel_manager():
|
||||
return ApiServer._ws_channel_manager
|
||||
def get_message_stream():
|
||||
return ApiServer._message_stream
|
||||
|
||||
|
||||
def is_webserver_mode(config=Depends(get_config)):
|
||||
|
|
|
@ -1,22 +1,17 @@
|
|||
import asyncio
|
||||
import logging
|
||||
from ipaddress import IPv4Address
|
||||
from threading import Thread
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import orjson
|
||||
import uvicorn
|
||||
from fastapi import Depends, FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
# Look into alternatives
|
||||
from janus import Queue as ThreadedQueue
|
||||
from starlette.responses import JSONResponse
|
||||
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer
|
||||
from freqtrade.rpc.api_server.ws import ChannelManager
|
||||
from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
from freqtrade.rpc.rpc import RPC, RPCException, RPCHandler
|
||||
|
||||
|
||||
|
@ -50,10 +45,8 @@ class ApiServer(RPCHandler):
|
|||
_config: Config = {}
|
||||
# Exchange - only available in webserver mode.
|
||||
_exchange = None
|
||||
# websocket message queue stuff
|
||||
_ws_channel_manager: ChannelManager
|
||||
_ws_thread = None
|
||||
_ws_loop: Optional[asyncio.AbstractEventLoop] = None
|
||||
# websocket message stuff
|
||||
_message_stream: Optional[MessageStream] = None
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""
|
||||
|
@ -71,15 +64,11 @@ class ApiServer(RPCHandler):
|
|||
return
|
||||
self._standalone: bool = standalone
|
||||
self._server = None
|
||||
self._ws_queue: Optional[ThreadedQueue] = None
|
||||
self._ws_background_task = None
|
||||
|
||||
ApiServer.__initialized = True
|
||||
|
||||
api_config = self._config['api_server']
|
||||
|
||||
ApiServer._ws_channel_manager = ChannelManager()
|
||||
|
||||
self.app = FastAPI(title="Freqtrade API",
|
||||
docs_url='/docs' if api_config.get('enable_openapi', False) else None,
|
||||
redoc_url=None,
|
||||
|
@ -105,21 +94,9 @@ class ApiServer(RPCHandler):
|
|||
del ApiServer._rpc
|
||||
if self._server and not self._standalone:
|
||||
logger.info("Stopping API Server")
|
||||
# self._server.force_exit, self._server.should_exit = True, True
|
||||
self._server.cleanup()
|
||||
|
||||
if self._ws_thread and self._ws_loop:
|
||||
logger.info("Stopping API Server background tasks")
|
||||
|
||||
if self._ws_background_task:
|
||||
# Cancel the queue task
|
||||
self._ws_background_task.cancel()
|
||||
|
||||
self._ws_thread.join()
|
||||
|
||||
self._ws_thread = None
|
||||
self._ws_loop = None
|
||||
self._ws_background_task = None
|
||||
|
||||
@classmethod
|
||||
def shutdown(cls):
|
||||
cls.__initialized = False
|
||||
|
@ -129,9 +106,11 @@ class ApiServer(RPCHandler):
|
|||
cls._rpc = None
|
||||
|
||||
def send_msg(self, msg: Dict[str, Any]) -> None:
|
||||
if self._ws_queue:
|
||||
sync_q = self._ws_queue.sync_q
|
||||
sync_q.put(msg)
|
||||
"""
|
||||
Publish the message to the message stream
|
||||
"""
|
||||
if ApiServer._message_stream:
|
||||
ApiServer._message_stream.publish(msg)
|
||||
|
||||
def handle_rpc_exception(self, request, exc):
|
||||
logger.exception(f"API Error calling: {exc}")
|
||||
|
@ -170,54 +149,30 @@ class ApiServer(RPCHandler):
|
|||
)
|
||||
|
||||
app.add_exception_handler(RPCException, self.handle_rpc_exception)
|
||||
app.add_event_handler(
|
||||
event_type="startup",
|
||||
func=self._api_startup_event
|
||||
)
|
||||
app.add_event_handler(
|
||||
event_type="shutdown",
|
||||
func=self._api_shutdown_event
|
||||
)
|
||||
|
||||
def start_message_queue(self):
|
||||
if self._ws_thread:
|
||||
return
|
||||
async def _api_startup_event(self):
|
||||
"""
|
||||
Creates the MessageStream class on startup
|
||||
so it has access to the same event loop
|
||||
as uvicorn
|
||||
"""
|
||||
if not ApiServer._message_stream:
|
||||
ApiServer._message_stream = MessageStream()
|
||||
|
||||
# Create a new loop, as it'll be just for the background thread
|
||||
self._ws_loop = asyncio.new_event_loop()
|
||||
|
||||
# Start the thread
|
||||
self._ws_thread = Thread(target=self._ws_loop.run_forever)
|
||||
self._ws_thread.start()
|
||||
|
||||
# Finally, submit the coro to the thread
|
||||
self._ws_background_task = asyncio.run_coroutine_threadsafe(
|
||||
self._broadcast_queue_data(), loop=self._ws_loop)
|
||||
|
||||
async def _broadcast_queue_data(self) -> None:
|
||||
# Instantiate the queue in this coroutine so it's attached to our loop
|
||||
self._ws_queue = ThreadedQueue()
|
||||
async_queue = self._ws_queue.async_q
|
||||
|
||||
try:
|
||||
while True:
|
||||
logger.debug("Getting queue messages...")
|
||||
if (qsize := async_queue.qsize()) > 20:
|
||||
# If the queue becomes too big for too long, this may indicate a problem.
|
||||
logger.warning(f"Queue size now {qsize}")
|
||||
# Get data from queue
|
||||
message: WSMessageSchemaType = await async_queue.get()
|
||||
logger.debug(f"Found message of type: {message.get('type')}")
|
||||
async_queue.task_done()
|
||||
# Broadcast it
|
||||
await self._ws_channel_manager.broadcast(message)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# For testing, shouldn't happen when stable
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception happened in background task: {e}")
|
||||
|
||||
finally:
|
||||
# Disconnect channels and stop the loop on cancel
|
||||
await self._ws_channel_manager.disconnect_all()
|
||||
if self._ws_loop:
|
||||
self._ws_loop.stop()
|
||||
# Avoid adding more items to the queue if they aren't
|
||||
# going to get broadcasted.
|
||||
self._ws_queue = None
|
||||
async def _api_shutdown_event(self):
|
||||
"""
|
||||
Removes the MessageStream class on shutdown
|
||||
"""
|
||||
if ApiServer._message_stream:
|
||||
ApiServer._message_stream = None
|
||||
|
||||
def start_api(self):
|
||||
"""
|
||||
|
@ -257,7 +212,6 @@ class ApiServer(RPCHandler):
|
|||
if self._standalone:
|
||||
self._server.run()
|
||||
else:
|
||||
self.start_message_queue()
|
||||
self._server.run_in_thread()
|
||||
except Exception:
|
||||
logger.exception("Api server failed to start.")
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
from freqtrade.rpc.api_server.ws.types import WebSocketType
|
||||
from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy
|
||||
from freqtrade.rpc.api_server.ws.serializer import HybridJSONWebSocketSerializer
|
||||
from freqtrade.rpc.api_server.ws.channel import ChannelManager, WebSocketChannel
|
||||
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from threading import RLock
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
from collections import deque
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union
|
||||
from uuid import uuid4
|
||||
|
||||
from fastapi import WebSocket as FastAPIWebSocket
|
||||
from fastapi import WebSocketDisconnect
|
||||
from websockets.exceptions import ConnectionClosed
|
||||
|
||||
from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy
|
||||
from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer,
|
||||
|
@ -21,31 +23,29 @@ class WebSocketChannel:
|
|||
"""
|
||||
Object to help facilitate managing a websocket connection
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
websocket: WebSocketType,
|
||||
channel_id: Optional[str] = None,
|
||||
drain_timeout: int = 3,
|
||||
throttle: float = 0.01,
|
||||
serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer
|
||||
serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer,
|
||||
send_throttle: float = 0.01
|
||||
):
|
||||
|
||||
self.channel_id = channel_id if channel_id else uuid4().hex[:8]
|
||||
|
||||
# The WebSocket object
|
||||
self._websocket = WebSocketProxy(websocket)
|
||||
|
||||
self.drain_timeout = drain_timeout
|
||||
self.throttle = throttle
|
||||
|
||||
self._subscriptions: List[str] = []
|
||||
# 32 is the size of the receiving queue in websockets package
|
||||
self.queue: asyncio.Queue[Dict[str, Any]] = asyncio.Queue(maxsize=32)
|
||||
self._relay_task = asyncio.create_task(self.relay())
|
||||
|
||||
# Internal event to signify a closed websocket
|
||||
self._closed = asyncio.Event()
|
||||
# The async tasks created for the channel
|
||||
self._channel_tasks: List[asyncio.Task] = []
|
||||
|
||||
# Deque for average send times
|
||||
self._send_times: Deque[float] = deque([], maxlen=10)
|
||||
# High limit defaults to 3 to start
|
||||
self._send_high_limit = 3
|
||||
self._send_throttle = send_throttle
|
||||
|
||||
# The subscribed message types
|
||||
self._subscriptions: List[str] = []
|
||||
|
||||
# Wrap the WebSocket in the Serializing class
|
||||
self._wrapped_ws = serializer_cls(self._websocket)
|
||||
|
@ -61,43 +61,59 @@ class WebSocketChannel:
|
|||
def remote_addr(self):
|
||||
return self._websocket.remote_addr
|
||||
|
||||
async def _send(self, data):
|
||||
"""
|
||||
Send data on the wrapped websocket
|
||||
"""
|
||||
await self._wrapped_ws.send(data)
|
||||
@property
|
||||
def avg_send_time(self):
|
||||
return sum(self._send_times) / len(self._send_times)
|
||||
|
||||
async def send(self, data) -> bool:
|
||||
def _calc_send_limit(self):
|
||||
"""
|
||||
Add the data to the queue to be sent.
|
||||
:returns: True if data added to queue, False otherwise
|
||||
Calculate the send high limit for this channel
|
||||
"""
|
||||
|
||||
# This block only runs if the queue is full, it will wait
|
||||
# until self.drain_timeout for the relay to drain the outgoing queue
|
||||
# We can't use asyncio.wait_for here because the queue may have been created with a
|
||||
# different eventloop
|
||||
if not self.is_closed():
|
||||
start = time.time()
|
||||
while self.queue.full():
|
||||
await asyncio.sleep(1)
|
||||
if (time.time() - start) > self.drain_timeout:
|
||||
return False
|
||||
# Only update if we have enough data
|
||||
if len(self._send_times) == self._send_times.maxlen:
|
||||
# At least 1s or twice the average of send times, with a
|
||||
# maximum of 3 seconds per message
|
||||
self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3)
|
||||
|
||||
# If for some reason the queue is still full, just return False
|
||||
try:
|
||||
self.queue.put_nowait(data)
|
||||
except asyncio.QueueFull:
|
||||
return False
|
||||
async def send(
|
||||
self,
|
||||
message: Union[WSMessageSchemaType, Dict[str, Any]],
|
||||
timeout: bool = False
|
||||
):
|
||||
"""
|
||||
Send a message on the wrapped websocket. If the sending
|
||||
takes too long, it will raise a TimeoutError and
|
||||
disconnect the connection.
|
||||
|
||||
# If we got here everything is ok
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
:param message: The message to send
|
||||
:param timeout: Enforce send high limit, defaults to False
|
||||
"""
|
||||
try:
|
||||
_ = time.time()
|
||||
# If the send times out, it will raise
|
||||
# a TimeoutError and bubble up to the
|
||||
# message_endpoint to close the connection
|
||||
await asyncio.wait_for(
|
||||
self._wrapped_ws.send(message),
|
||||
timeout=self._send_high_limit if timeout else None
|
||||
)
|
||||
total_time = time.time() - _
|
||||
self._send_times.append(total_time)
|
||||
|
||||
self._calc_send_limit()
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"Connection for {self} timed out, disconnecting")
|
||||
raise
|
||||
|
||||
# Explicitly give control back to event loop as
|
||||
# websockets.send does not
|
||||
# Also throttles how fast we send
|
||||
await asyncio.sleep(self._send_throttle)
|
||||
|
||||
async def recv(self):
|
||||
"""
|
||||
Receive data on the wrapped websocket
|
||||
Receive a message on the wrapped websocket
|
||||
"""
|
||||
return await self._wrapped_ws.recv()
|
||||
|
||||
|
@ -107,17 +123,27 @@ class WebSocketChannel:
|
|||
"""
|
||||
return await self._websocket.ping()
|
||||
|
||||
async def accept(self):
|
||||
"""
|
||||
Accept the underlying websocket connection,
|
||||
if the connection has been closed before we can
|
||||
accept, just close the channel.
|
||||
"""
|
||||
try:
|
||||
return await self._websocket.accept()
|
||||
except RuntimeError:
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
"""
|
||||
Close the WebSocketChannel
|
||||
"""
|
||||
|
||||
self._closed.set()
|
||||
self._relay_task.cancel()
|
||||
|
||||
try:
|
||||
await self.raw_websocket.close()
|
||||
except Exception:
|
||||
await self._websocket.close()
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
def is_closed(self) -> bool:
|
||||
|
@ -142,99 +168,76 @@ class WebSocketChannel:
|
|||
"""
|
||||
return message_type in self._subscriptions
|
||||
|
||||
async def relay(self):
|
||||
async def run_channel_tasks(self, *tasks, **kwargs):
|
||||
"""
|
||||
Relay messages from the channel's queue and send them out. This is started
|
||||
as a task.
|
||||
Create and await on the channel tasks unless an exception
|
||||
was raised, then cancel them all.
|
||||
|
||||
:params *tasks: All coros or tasks to be run concurrently
|
||||
:param **kwargs: Any extra kwargs to pass to gather
|
||||
"""
|
||||
while not self._closed.is_set():
|
||||
message = await self.queue.get()
|
||||
|
||||
if not self.is_closed():
|
||||
# Wrap the coros into tasks if they aren't already
|
||||
self._channel_tasks = [
|
||||
task if isinstance(task, asyncio.Task) else asyncio.create_task(task)
|
||||
for task in tasks
|
||||
]
|
||||
|
||||
try:
|
||||
await self._send(message)
|
||||
self.queue.task_done()
|
||||
return await asyncio.gather(*self._channel_tasks, **kwargs)
|
||||
except Exception:
|
||||
# If an exception occurred, cancel the rest of the tasks
|
||||
await self.cancel_channel_tasks()
|
||||
|
||||
# Limit messages per sec.
|
||||
# Could cause problems with queue size if too low, and
|
||||
# problems with network traffik if too high.
|
||||
# 0.01 = 100/s
|
||||
await asyncio.sleep(self.throttle)
|
||||
except RuntimeError:
|
||||
# The connection was closed, just exit the task
|
||||
return
|
||||
|
||||
|
||||
class ChannelManager:
|
||||
def __init__(self):
|
||||
self.channels = dict()
|
||||
self._lock = RLock() # Re-entrant Lock
|
||||
|
||||
async def on_connect(self, websocket: WebSocketType):
|
||||
async def cancel_channel_tasks(self):
|
||||
"""
|
||||
Wrap websocket connection into Channel and add to list
|
||||
|
||||
:param websocket: The WebSocket object to attach to the Channel
|
||||
Cancel and wait on all channel tasks
|
||||
"""
|
||||
if isinstance(websocket, FastAPIWebSocket):
|
||||
for task in self._channel_tasks:
|
||||
task.cancel()
|
||||
|
||||
# Wait for tasks to finish cancelling
|
||||
try:
|
||||
await websocket.accept()
|
||||
except RuntimeError:
|
||||
# The connection was closed before we could accept it
|
||||
return
|
||||
await task
|
||||
except (
|
||||
asyncio.CancelledError,
|
||||
asyncio.TimeoutError,
|
||||
WebSocketDisconnect,
|
||||
ConnectionClosed,
|
||||
RuntimeError
|
||||
):
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.info(f"Encountered unknown exception: {e}", exc_info=e)
|
||||
|
||||
ws_channel = WebSocketChannel(websocket)
|
||||
self._channel_tasks = []
|
||||
|
||||
with self._lock:
|
||||
self.channels[websocket] = ws_channel
|
||||
|
||||
return ws_channel
|
||||
|
||||
async def on_disconnect(self, websocket: WebSocketType):
|
||||
async def __aiter__(self):
|
||||
"""
|
||||
Call close on the channel if it's not, and remove from channel list
|
||||
Generator for received messages
|
||||
"""
|
||||
# We can not catch any errors here as websocket.recv is
|
||||
# the first to catch any disconnects and bubble it up
|
||||
# so the connection is garbage collected right away
|
||||
while not self.is_closed():
|
||||
yield await self.recv()
|
||||
|
||||
:param websocket: The WebSocket objet attached to the Channel
|
||||
"""
|
||||
with self._lock:
|
||||
channel = self.channels.get(websocket)
|
||||
if channel:
|
||||
logger.info(f"Disconnecting channel {channel}")
|
||||
if not channel.is_closed():
|
||||
await channel.close()
|
||||
|
||||
del self.channels[websocket]
|
||||
@asynccontextmanager
|
||||
async def create_channel(
|
||||
websocket: WebSocketType,
|
||||
**kwargs
|
||||
) -> AsyncIterator[WebSocketChannel]:
|
||||
"""
|
||||
Context manager for safely opening and closing a WebSocketChannel
|
||||
"""
|
||||
channel = WebSocketChannel(websocket, **kwargs)
|
||||
try:
|
||||
await channel.accept()
|
||||
logger.info(f"Connected to channel - {channel}")
|
||||
|
||||
async def disconnect_all(self):
|
||||
"""
|
||||
Disconnect all Channels
|
||||
"""
|
||||
with self._lock:
|
||||
for websocket in self.channels.copy().keys():
|
||||
await self.on_disconnect(websocket)
|
||||
|
||||
async def broadcast(self, message: WSMessageSchemaType):
|
||||
"""
|
||||
Broadcast a message on all Channels
|
||||
|
||||
:param message: The message to send
|
||||
"""
|
||||
with self._lock:
|
||||
for channel in self.channels.copy().values():
|
||||
if channel.subscribed_to(message.get('type')):
|
||||
await self.send_direct(channel, message)
|
||||
|
||||
async def send_direct(
|
||||
self, channel: WebSocketChannel, message: Union[WSMessageSchemaType, Dict[str, Any]]):
|
||||
"""
|
||||
Send a message directly through direct_channel only
|
||||
|
||||
:param direct_channel: The WebSocketChannel object to send the message through
|
||||
:param message: The message to send
|
||||
"""
|
||||
if not await channel.send(message):
|
||||
await self.on_disconnect(channel.raw_websocket)
|
||||
|
||||
def has_channels(self):
|
||||
"""
|
||||
Flag for more than 0 channels
|
||||
"""
|
||||
return len(self.channels) > 0
|
||||
yield channel
|
||||
finally:
|
||||
await channel.close()
|
||||
logger.info(f"Disconnected from channel - {channel}")
|
||||
|
|
31
freqtrade/rpc/api_server/ws/message_stream.py
Normal file
31
freqtrade/rpc/api_server/ws/message_stream.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import asyncio
|
||||
import time
|
||||
|
||||
|
||||
class MessageStream:
|
||||
"""
|
||||
A message stream for consumers to subscribe to,
|
||||
and for producers to publish to.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._loop = asyncio.get_running_loop()
|
||||
self._waiter = self._loop.create_future()
|
||||
|
||||
def publish(self, message):
|
||||
"""
|
||||
Publish a message to this MessageStream
|
||||
|
||||
:param message: The message to publish
|
||||
"""
|
||||
waiter, self._waiter = self._waiter, self._loop.create_future()
|
||||
waiter.set_result((message, time.time(), self._waiter))
|
||||
|
||||
async def __aiter__(self):
|
||||
"""
|
||||
Iterate over the messages in the message stream
|
||||
"""
|
||||
waiter = self._waiter
|
||||
while True:
|
||||
# Shield the future from being cancelled by a task waiting on it
|
||||
message, ts, waiter = await asyncio.shield(waiter)
|
||||
yield message, ts
|
|
@ -1,5 +1,6 @@
|
|||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import orjson
|
||||
import rapidjson
|
||||
|
@ -7,6 +8,7 @@ from pandas import DataFrame
|
|||
|
||||
from freqtrade.misc import dataframe_to_json, json_to_dataframe
|
||||
from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy
|
||||
from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -24,17 +26,13 @@ class WebSocketSerializer(ABC):
|
|||
def _deserialize(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def send(self, data: bytes):
|
||||
async def send(self, data: Union[WSMessageSchemaType, Dict[str, Any]]):
|
||||
await self._websocket.send(self._serialize(data))
|
||||
|
||||
async def recv(self) -> bytes:
|
||||
data = await self._websocket.recv()
|
||||
|
||||
return self._deserialize(data)
|
||||
|
||||
async def close(self, code: int = 1000):
|
||||
await self._websocket.close(code)
|
||||
|
||||
|
||||
class HybridJSONWebSocketSerializer(WebSocketSerializer):
|
||||
def _serialize(self, data) -> str:
|
||||
|
|
|
@ -47,7 +47,7 @@ class WSWhitelistRequest(WSRequestSchema):
|
|||
|
||||
class WSAnalyzedDFRequest(WSRequestSchema):
|
||||
type: RPCRequestType = RPCRequestType.ANALYZED_DF
|
||||
data: Dict[str, Any] = {"limit": 1500}
|
||||
data: Dict[str, Any] = {"limit": 1500, "pair": None}
|
||||
|
||||
|
||||
# ------------------------------ MESSAGE SCHEMAS ----------------------------
|
||||
|
|
|
@ -8,15 +8,17 @@ import asyncio
|
|||
import logging
|
||||
import socket
|
||||
from threading import Thread
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, TypedDict
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, TypedDict, Union
|
||||
|
||||
import websockets
|
||||
from pydantic import ValidationError
|
||||
|
||||
from freqtrade.constants import FULL_DATAFRAME_THRESHOLD
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.enums import RPCMessageType
|
||||
from freqtrade.misc import remove_entry_exit_signals
|
||||
from freqtrade.rpc.api_server.ws import WebSocketChannel
|
||||
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
|
||||
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
|
||||
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSAnalyzedDFRequest,
|
||||
WSMessageSchema, WSRequestSchema,
|
||||
WSSubscribeRequest, WSWhitelistMessage,
|
||||
|
@ -38,6 +40,10 @@ class Producer(TypedDict):
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def schema_to_dict(schema: Union[WSMessageSchema, WSRequestSchema]):
|
||||
return schema.dict(exclude_none=True)
|
||||
|
||||
|
||||
class ExternalMessageConsumer:
|
||||
"""
|
||||
The main controller class for consuming external messages from
|
||||
|
@ -92,6 +98,8 @@ class ExternalMessageConsumer:
|
|||
RPCMessageType.ANALYZED_DF: self._consume_analyzed_df_message,
|
||||
}
|
||||
|
||||
self._channel_streams: Dict[str, MessageStream] = {}
|
||||
|
||||
self.start()
|
||||
|
||||
def start(self):
|
||||
|
@ -118,6 +126,8 @@ class ExternalMessageConsumer:
|
|||
logger.info("Stopping ExternalMessageConsumer")
|
||||
self._running = False
|
||||
|
||||
self._channel_streams = {}
|
||||
|
||||
if self._sub_tasks:
|
||||
# Cancel sub tasks
|
||||
for task in self._sub_tasks:
|
||||
|
@ -175,7 +185,6 @@ class ExternalMessageConsumer:
|
|||
:param producer: Dictionary containing producer info
|
||||
:param lock: An asyncio Lock
|
||||
"""
|
||||
channel = None
|
||||
while self._running:
|
||||
try:
|
||||
host, port = producer['host'], producer['port']
|
||||
|
@ -190,19 +199,21 @@ class ExternalMessageConsumer:
|
|||
max_size=self.message_size_limit,
|
||||
ping_interval=None
|
||||
) as ws:
|
||||
channel = WebSocketChannel(ws, channel_id=name)
|
||||
async with create_channel(
|
||||
ws,
|
||||
channel_id=name,
|
||||
send_throttle=0.5
|
||||
) as channel:
|
||||
|
||||
logger.info(f"Producer connection success - {channel}")
|
||||
# Create the message stream for this channel
|
||||
self._channel_streams[name] = MessageStream()
|
||||
|
||||
# Now request the initial data from this Producer
|
||||
for request in self._initial_requests:
|
||||
await channel.send(
|
||||
request.dict(exclude_none=True)
|
||||
# Run the channel tasks while connected
|
||||
await channel.run_channel_tasks(
|
||||
self._receive_messages(channel, producer, lock),
|
||||
self._send_requests(channel, self._channel_streams[name])
|
||||
)
|
||||
|
||||
# Now receive data, if none is within the time limit, ping
|
||||
await self._receive_messages(channel, producer, lock)
|
||||
|
||||
except (websockets.exceptions.InvalidURI, ValueError) as e:
|
||||
logger.error(f"{ws_url} is an invalid WebSocket URL - {e}")
|
||||
break
|
||||
|
@ -229,11 +240,19 @@ class ExternalMessageConsumer:
|
|||
# An unforseen error has occurred, log and continue
|
||||
logger.error("Unexpected error has occurred:")
|
||||
logger.exception(e)
|
||||
await asyncio.sleep(self.sleep_time)
|
||||
continue
|
||||
|
||||
finally:
|
||||
if channel:
|
||||
await channel.close()
|
||||
async def _send_requests(self, channel: WebSocketChannel, channel_stream: MessageStream):
|
||||
# Send the initial requests
|
||||
for init_request in self._initial_requests:
|
||||
await channel.send(schema_to_dict(init_request))
|
||||
|
||||
# Now send any subsequent requests published to
|
||||
# this channel's stream
|
||||
async for request, _ in channel_stream:
|
||||
logger.debug(f"Sending request to channel - {channel} - {request}")
|
||||
await channel.send(request)
|
||||
|
||||
async def _receive_messages(
|
||||
self,
|
||||
|
@ -270,19 +289,31 @@ class ExternalMessageConsumer:
|
|||
latency = (await asyncio.wait_for(pong, timeout=self.ping_timeout) * 1000)
|
||||
|
||||
logger.info(f"Connection to {channel} still alive, latency: {latency}ms")
|
||||
|
||||
continue
|
||||
except (websockets.exceptions.ConnectionClosed):
|
||||
# Just eat the error and continue reconnecting
|
||||
logger.warning(f"Disconnection in {channel} - retrying in {self.sleep_time}s")
|
||||
await asyncio.sleep(self.sleep_time)
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
# Just eat the error and continue reconnecting
|
||||
logger.warning(f"Ping error {channel} - {e} - retrying in {self.sleep_time}s")
|
||||
logger.debug(e, exc_info=e)
|
||||
await asyncio.sleep(self.sleep_time)
|
||||
raise
|
||||
|
||||
break
|
||||
def send_producer_request(
|
||||
self,
|
||||
producer_name: str,
|
||||
request: Union[WSRequestSchema, Dict[str, Any]]
|
||||
):
|
||||
"""
|
||||
Publish a message to the producer's message stream to be
|
||||
sent by the channel task.
|
||||
|
||||
:param producer_name: The name of the producer to publish the message to
|
||||
:param request: The request to send to the producer
|
||||
"""
|
||||
if isinstance(request, WSRequestSchema):
|
||||
request = schema_to_dict(request)
|
||||
|
||||
if channel_stream := self._channel_streams.get(producer_name):
|
||||
channel_stream.publish(request)
|
||||
|
||||
def handle_producer_message(self, producer: Producer, message: Dict[str, Any]):
|
||||
"""
|
||||
|
@ -336,16 +367,45 @@ class ExternalMessageConsumer:
|
|||
|
||||
pair, timeframe, candle_type = key
|
||||
|
||||
if df.empty:
|
||||
logger.debug(f"Received Empty Dataframe for {key}")
|
||||
return
|
||||
|
||||
# If set, remove the Entry and Exit signals from the Producer
|
||||
if self._emc_config.get('remove_entry_exit_signals', False):
|
||||
df = remove_entry_exit_signals(df)
|
||||
|
||||
# Add the dataframe to the dataprovider
|
||||
self._dp._add_external_df(pair, df,
|
||||
last_analyzed=la,
|
||||
timeframe=timeframe,
|
||||
candle_type=candle_type,
|
||||
producer_name=producer_name)
|
||||
logger.debug(f"Received {len(df)} candle(s) for {key}")
|
||||
|
||||
did_append, n_missing = self._dp._add_external_df(
|
||||
pair,
|
||||
df,
|
||||
last_analyzed=la,
|
||||
timeframe=timeframe,
|
||||
candle_type=candle_type,
|
||||
producer_name=producer_name
|
||||
)
|
||||
|
||||
if not did_append:
|
||||
# We want an overlap in candles incase some data has changed
|
||||
n_missing += 1
|
||||
# Set to None for all candles if we missed a full df's worth of candles
|
||||
n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500
|
||||
|
||||
logger.warning(f"Holes in data or no existing df, requesting {n_missing} candles "
|
||||
f"for {key} from `{producer_name}`")
|
||||
|
||||
self.send_producer_request(
|
||||
producer_name,
|
||||
WSAnalyzedDFRequest(
|
||||
data={
|
||||
"limit": n_missing,
|
||||
"pair": pair
|
||||
}
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Consumed message from `{producer_name}` of type `RPCMessageType.ANALYZED_DF`")
|
||||
f"Consumed message from `{producer_name}` "
|
||||
f"of type `RPCMessageType.ANALYZED_DF` for {key}")
|
||||
|
|
|
@ -167,6 +167,7 @@ class RPC:
|
|||
results = []
|
||||
for trade in trades:
|
||||
order: Optional[Order] = None
|
||||
current_profit_fiat: Optional[float] = None
|
||||
if trade.open_order_id:
|
||||
order = trade.select_order_by_order_id(trade.open_order_id)
|
||||
# calculate profit and send message to user
|
||||
|
@ -176,23 +177,26 @@ class RPC:
|
|||
trade.pair, side='exit', is_short=trade.is_short, refresh=False)
|
||||
except (ExchangeError, PricingError):
|
||||
current_rate = NAN
|
||||
if len(trade.select_filled_orders(trade.entry_side)) > 0:
|
||||
current_profit = trade.calc_profit_ratio(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
current_profit_abs = trade.calc_profit(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
else:
|
||||
current_profit = current_profit_abs = current_profit_fiat = 0.0
|
||||
else:
|
||||
# Closed trade ...
|
||||
current_rate = trade.close_rate
|
||||
if len(trade.select_filled_orders(trade.entry_side)) > 0:
|
||||
current_profit = trade.calc_profit_ratio(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
current_profit_abs = trade.calc_profit(
|
||||
current_rate) if not isnan(current_rate) else NAN
|
||||
current_profit_fiat: Optional[float] = None
|
||||
# Calculate fiat profit
|
||||
if self._fiat_converter:
|
||||
current_profit_fiat = self._fiat_converter.convert_amount(
|
||||
current_profit_abs,
|
||||
self._freqtrade.config['stake_currency'],
|
||||
self._freqtrade.config['fiat_display_currency']
|
||||
)
|
||||
else:
|
||||
current_profit = current_profit_abs = current_profit_fiat = 0.0
|
||||
current_profit = trade.close_profit
|
||||
current_profit_abs = trade.close_profit_abs
|
||||
|
||||
# Calculate fiat profit
|
||||
if not isnan(current_profit_abs) and self._fiat_converter:
|
||||
current_profit_fiat = self._fiat_converter.convert_amount(
|
||||
current_profit_abs,
|
||||
self._freqtrade.config['stake_currency'],
|
||||
self._freqtrade.config['fiat_display_currency']
|
||||
)
|
||||
|
||||
# Calculate guaranteed profit (in case of trailing stop)
|
||||
stoploss_entry_dist = trade.calc_profit(trade.stop_loss)
|
||||
|
@ -740,6 +744,24 @@ class RPC:
|
|||
self._freqtrade.wallets.update()
|
||||
return {'result': f'Created sell order for trade {trade_id}.'}
|
||||
|
||||
def _force_entry_validations(self, pair: str, order_side: SignalDirection):
|
||||
if not self._freqtrade.config.get('force_entry_enable', False):
|
||||
raise RPCException('Force_entry not enabled.')
|
||||
|
||||
if self._freqtrade.state != State.RUNNING:
|
||||
raise RPCException('trader is not running')
|
||||
|
||||
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
|
||||
raise RPCException("Can't go short on Spot markets.")
|
||||
|
||||
if pair not in self._freqtrade.exchange.get_markets(tradable_only=True):
|
||||
raise RPCException('Symbol does not exist or market is not active.')
|
||||
# Check if pair quote currency equals to the stake currency.
|
||||
stake_currency = self._freqtrade.config.get('stake_currency')
|
||||
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
|
||||
raise RPCException(
|
||||
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
|
||||
|
||||
def _rpc_force_entry(self, pair: str, price: Optional[float], *,
|
||||
order_type: Optional[str] = None,
|
||||
order_side: SignalDirection = SignalDirection.LONG,
|
||||
|
@ -750,21 +772,8 @@ class RPC:
|
|||
Handler for forcebuy <asset> <price>
|
||||
Buys a pair trade at the given or current price
|
||||
"""
|
||||
self._force_entry_validations(pair, order_side)
|
||||
|
||||
if not self._freqtrade.config.get('force_entry_enable', False):
|
||||
raise RPCException('Force_entry not enabled.')
|
||||
|
||||
if self._freqtrade.state != State.RUNNING:
|
||||
raise RPCException('trader is not running')
|
||||
|
||||
if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:
|
||||
raise RPCException("Can't go short on Spot markets.")
|
||||
|
||||
# Check if pair quote currency equals to the stake currency.
|
||||
stake_currency = self._freqtrade.config.get('stake_currency')
|
||||
if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:
|
||||
raise RPCException(
|
||||
f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')
|
||||
# check if valid pair
|
||||
|
||||
# check if pair already has an open pair
|
||||
|
@ -1053,15 +1062,26 @@ class RPC:
|
|||
return self._convert_dataframe_to_dict(self._freqtrade.config['strategy'],
|
||||
pair, timeframe, _data, last_analyzed)
|
||||
|
||||
def __rpc_analysed_dataframe_raw(self, pair: str, timeframe: str,
|
||||
limit: Optional[int]) -> Tuple[DataFrame, datetime]:
|
||||
""" Get the dataframe and last analyze from the dataprovider """
|
||||
def __rpc_analysed_dataframe_raw(
|
||||
self,
|
||||
pair: str,
|
||||
timeframe: str,
|
||||
limit: Optional[int]
|
||||
) -> Tuple[DataFrame, datetime]:
|
||||
"""
|
||||
Get the dataframe and last analyze from the dataprovider
|
||||
|
||||
:param pair: The pair to get
|
||||
:param timeframe: The timeframe of data to get
|
||||
:param limit: The amount of candles in the dataframe
|
||||
"""
|
||||
_data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe(
|
||||
pair, timeframe)
|
||||
_data = _data.copy()
|
||||
|
||||
if limit:
|
||||
_data = _data.iloc[-limit:]
|
||||
|
||||
return _data, last_analyzed
|
||||
|
||||
def _ws_all_analysed_dataframes(
|
||||
|
@ -1069,7 +1089,16 @@ class RPC:
|
|||
pairlist: List[str],
|
||||
limit: Optional[int]
|
||||
) -> Generator[Dict[str, Any], None, None]:
|
||||
""" Get the analysed dataframes of each pair in the pairlist """
|
||||
"""
|
||||
Get the analysed dataframes of each pair in the pairlist.
|
||||
If specified, only return the most recent `limit` candles for
|
||||
each dataframe.
|
||||
|
||||
:param pairlist: A list of pairs to get
|
||||
:param limit: If an integer, limits the size of dataframe
|
||||
If a list of string date times, only returns those candles
|
||||
:returns: A generator of dictionaries with the key, dataframe, and last analyzed timestamp
|
||||
"""
|
||||
timeframe = self._freqtrade.config['timeframe']
|
||||
candle_type = self._freqtrade.config.get('candle_type_def', CandleType.SPOT)
|
||||
|
||||
|
@ -1082,10 +1111,15 @@ class RPC:
|
|||
"la": last_analyzed
|
||||
}
|
||||
|
||||
def _ws_request_analyzed_df(self, limit: Optional[int]):
|
||||
def _ws_request_analyzed_df(
|
||||
self,
|
||||
limit: Optional[int] = None,
|
||||
pair: Optional[str] = None
|
||||
):
|
||||
""" Historical Analyzed Dataframes for WebSocket """
|
||||
whitelist = self._freqtrade.active_pair_whitelist
|
||||
return self._ws_all_analysed_dataframes(whitelist, limit)
|
||||
pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist
|
||||
|
||||
return self._ws_all_analysed_dataframes(pairlist, limit)
|
||||
|
||||
def _ws_request_whitelist(self):
|
||||
""" Whitelist data for WebSocket """
|
||||
|
|
|
@ -6,7 +6,7 @@ from collections import deque
|
|||
from typing import Any, Dict, List
|
||||
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.enums import RPCMessageType
|
||||
from freqtrade.enums import NO_ECHO_MESSAGES, RPCMessageType
|
||||
from freqtrade.rpc import RPC, RPCHandler
|
||||
|
||||
|
||||
|
@ -67,7 +67,7 @@ class RPCManager:
|
|||
'status': 'stopping bot'
|
||||
}
|
||||
"""
|
||||
if msg.get('type') not in (RPCMessageType.ANALYZED_DF, RPCMessageType.WHITELIST):
|
||||
if msg.get('type') not in NO_ECHO_MESSAGES:
|
||||
logger.info('Sending rpc message: %s', msg)
|
||||
if 'pair' in msg:
|
||||
msg.update({
|
||||
|
|
|
@ -79,6 +79,8 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]:
|
|||
)
|
||||
try:
|
||||
return command_handler(self, *args, **kwargs)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
except BaseException:
|
||||
logger.exception('Exception occurred within Telegram module')
|
||||
|
||||
|
@ -538,72 +540,67 @@ class Telegram(RPCHandler):
|
|||
handler for `/status` and `/status <id>`.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Check if there's at least one numerical ID provided.
|
||||
# If so, try to get only these trades.
|
||||
trade_ids = []
|
||||
if context.args and len(context.args) > 0:
|
||||
trade_ids = [int(i) for i in context.args if i.isnumeric()]
|
||||
|
||||
# Check if there's at least one numerical ID provided.
|
||||
# If so, try to get only these trades.
|
||||
trade_ids = []
|
||||
if context.args and len(context.args) > 0:
|
||||
trade_ids = [int(i) for i in context.args if i.isnumeric()]
|
||||
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
|
||||
position_adjust = self._config.get('position_adjustment_enable', False)
|
||||
max_entries = self._config.get('max_entry_position_adjustment', -1)
|
||||
for r in results:
|
||||
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
|
||||
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
|
||||
r['exit_reason'] = r.get('exit_reason', "")
|
||||
lines = [
|
||||
"*Trade ID:* `{trade_id}`" +
|
||||
(" `(since {open_date_hum})`" if r['is_open'] else ""),
|
||||
"*Current Pair:* {pair}",
|
||||
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
|
||||
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
|
||||
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
|
||||
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
|
||||
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
|
||||
]
|
||||
|
||||
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
|
||||
position_adjust = self._config.get('position_adjustment_enable', False)
|
||||
max_entries = self._config.get('max_entry_position_adjustment', -1)
|
||||
for r in results:
|
||||
r['open_date_hum'] = arrow.get(r['open_date']).humanize()
|
||||
r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']])
|
||||
r['exit_reason'] = r.get('exit_reason', "")
|
||||
lines = [
|
||||
"*Trade ID:* `{trade_id}`" +
|
||||
(" `(since {open_date_hum})`" if r['is_open'] else ""),
|
||||
"*Current Pair:* {pair}",
|
||||
"*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"),
|
||||
"*Leverage:* `{leverage}`" if r.get('leverage') else "",
|
||||
"*Amount:* `{amount} ({stake_amount} {quote_currency})`",
|
||||
"*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "",
|
||||
"*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "",
|
||||
]
|
||||
if position_adjust:
|
||||
max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "")
|
||||
lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str)
|
||||
|
||||
if position_adjust:
|
||||
max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "")
|
||||
lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str)
|
||||
lines.extend([
|
||||
"*Open Rate:* `{open_rate:.8f}`",
|
||||
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
|
||||
"*Open Date:* `{open_date}`",
|
||||
"*Close Date:* `{close_date}`" if r['close_date'] else "",
|
||||
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
|
||||
("*Current Profit:* " if r['is_open'] else "*Close Profit: *")
|
||||
+ "`{profit_ratio:.2%}`",
|
||||
])
|
||||
|
||||
lines.extend([
|
||||
"*Open Rate:* `{open_rate:.8f}`",
|
||||
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
|
||||
"*Open Date:* `{open_date}`",
|
||||
"*Close Date:* `{close_date}`" if r['close_date'] else "",
|
||||
"*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
|
||||
("*Current Profit:* " if r['is_open'] else "*Close Profit: *")
|
||||
+ "`{profit_ratio:.2%}`",
|
||||
])
|
||||
if r['is_open']:
|
||||
if r.get('realized_profit'):
|
||||
lines.append("*Realized Profit:* `{realized_profit:.8f}`")
|
||||
if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
|
||||
and r['initial_stop_loss_ratio'] is not None):
|
||||
# Adding initial stoploss only if it is different from stoploss
|
||||
lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` "
|
||||
"`({initial_stop_loss_ratio:.2%})`")
|
||||
|
||||
if r['is_open']:
|
||||
if r.get('realized_profit'):
|
||||
lines.append("*Realized Profit:* `{realized_profit:.8f}`")
|
||||
if (r['stop_loss_abs'] != r['initial_stop_loss_abs']
|
||||
and r['initial_stop_loss_ratio'] is not None):
|
||||
# Adding initial stoploss only if it is different from stoploss
|
||||
lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` "
|
||||
"`({initial_stop_loss_ratio:.2%})`")
|
||||
# Adding stoploss and stoploss percentage only if it is not None
|
||||
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
|
||||
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
|
||||
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
|
||||
"`({stoploss_current_dist_ratio:.2%})`")
|
||||
if r['open_order']:
|
||||
lines.append(
|
||||
"*Open Order:* `{open_order}`"
|
||||
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
|
||||
|
||||
# Adding stoploss and stoploss percentage only if it is not None
|
||||
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
|
||||
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
|
||||
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
|
||||
"`({stoploss_current_dist_ratio:.2%})`")
|
||||
if r['open_order']:
|
||||
lines.append(
|
||||
"*Open Order:* `{open_order}`"
|
||||
+ "- `{exit_order_status}`" if r['exit_order_status'] else "")
|
||||
|
||||
lines_detail = self._prepare_order_details(
|
||||
r['orders'], r['quote_currency'], r['is_open'])
|
||||
lines.extend(lines_detail if lines_detail else "")
|
||||
self.__send_status_msg(lines, r)
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
lines_detail = self._prepare_order_details(
|
||||
r['orders'], r['quote_currency'], r['is_open'])
|
||||
lines.extend(lines_detail if lines_detail else "")
|
||||
self.__send_status_msg(lines, r)
|
||||
|
||||
def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
|
||||
"""
|
||||
|
@ -630,37 +627,34 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
fiat_currency = self._config.get('fiat_display_currency', '')
|
||||
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
|
||||
self._config['stake_currency'], fiat_currency)
|
||||
fiat_currency = self._config.get('fiat_display_currency', '')
|
||||
statlist, head, fiat_profit_sum = self._rpc._rpc_status_table(
|
||||
self._config['stake_currency'], fiat_currency)
|
||||
|
||||
show_total = not isnan(fiat_profit_sum) and len(statlist) > 1
|
||||
max_trades_per_msg = 50
|
||||
"""
|
||||
Calculate the number of messages of 50 trades per message
|
||||
0.99 is used to make sure that there are no extra (empty) messages
|
||||
As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message
|
||||
"""
|
||||
messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1)
|
||||
for i in range(0, messages_count):
|
||||
trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg]
|
||||
if show_total and i == messages_count - 1:
|
||||
# append total line
|
||||
trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"])
|
||||
show_total = not isnan(fiat_profit_sum) and len(statlist) > 1
|
||||
max_trades_per_msg = 50
|
||||
"""
|
||||
Calculate the number of messages of 50 trades per message
|
||||
0.99 is used to make sure that there are no extra (empty) messages
|
||||
As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message
|
||||
"""
|
||||
messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1)
|
||||
for i in range(0, messages_count):
|
||||
trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg]
|
||||
if show_total and i == messages_count - 1:
|
||||
# append total line
|
||||
trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"])
|
||||
|
||||
message = tabulate(trades,
|
||||
headers=head,
|
||||
tablefmt='simple')
|
||||
if show_total and i == messages_count - 1:
|
||||
# insert separators line between Total
|
||||
lines = message.split("\n")
|
||||
message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]])
|
||||
self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_status_table",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
message = tabulate(trades,
|
||||
headers=head,
|
||||
tablefmt='simple')
|
||||
if show_total and i == messages_count - 1:
|
||||
# insert separators line between Total
|
||||
lines = message.split("\n")
|
||||
message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]])
|
||||
self._send_msg(f"<pre>{message}</pre>", parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_status_table",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None:
|
||||
|
@ -686,35 +680,32 @@ class Telegram(RPCHandler):
|
|||
timescale = int(context.args[0]) if context.args else val.default
|
||||
except (TypeError, ValueError, IndexError):
|
||||
timescale = val.default
|
||||
try:
|
||||
stats = self._rpc._rpc_timeunit_profit(
|
||||
timescale,
|
||||
stake_cur,
|
||||
fiat_disp_cur,
|
||||
unit
|
||||
)
|
||||
stats_tab = tabulate(
|
||||
[[f"{period['date']} ({period['trade_count']})",
|
||||
f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}",
|
||||
f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}",
|
||||
f"{period['rel_profit']:.2%}",
|
||||
] for period in stats['data']],
|
||||
headers=[
|
||||
f"{val.header} (count)",
|
||||
f'{stake_cur}',
|
||||
f'{fiat_disp_cur}',
|
||||
'Profit %',
|
||||
'Trades',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (
|
||||
f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n'
|
||||
f'<pre>{stats_tab}</pre>'
|
||||
)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True,
|
||||
callback_path=val.callback, query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
stats = self._rpc._rpc_timeunit_profit(
|
||||
timescale,
|
||||
stake_cur,
|
||||
fiat_disp_cur,
|
||||
unit
|
||||
)
|
||||
stats_tab = tabulate(
|
||||
[[f"{period['date']} ({period['trade_count']})",
|
||||
f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}",
|
||||
f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}",
|
||||
f"{period['rel_profit']:.2%}",
|
||||
] for period in stats['data']],
|
||||
headers=[
|
||||
f"{val.header} (count)",
|
||||
f'{stake_cur}',
|
||||
f'{fiat_disp_cur}',
|
||||
'Profit %',
|
||||
'Trades',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (
|
||||
f'<b>{val.message} Profit over the last {timescale} {val.message2}</b>:\n'
|
||||
f'<pre>{stats_tab}</pre>'
|
||||
)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True,
|
||||
callback_path=val.callback, query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _daily(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -878,79 +869,76 @@ class Telegram(RPCHandler):
|
|||
@authorized_only
|
||||
def _balance(self, update: Update, context: CallbackContext) -> None:
|
||||
""" Handler for /balance """
|
||||
try:
|
||||
result = self._rpc._rpc_balance(self._config['stake_currency'],
|
||||
self._config.get('fiat_display_currency', ''))
|
||||
result = self._rpc._rpc_balance(self._config['stake_currency'],
|
||||
self._config.get('fiat_display_currency', ''))
|
||||
|
||||
balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0)
|
||||
if not balance_dust_level:
|
||||
balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0)
|
||||
balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0)
|
||||
if not balance_dust_level:
|
||||
balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0)
|
||||
|
||||
output = ''
|
||||
if self._config['dry_run']:
|
||||
output += "*Warning:* Simulated balances in Dry Mode.\n"
|
||||
starting_cap = round_coin_value(
|
||||
result['starting_capital'], self._config['stake_currency'])
|
||||
output += f"Starting capital: `{starting_cap}`"
|
||||
starting_cap_fiat = round_coin_value(
|
||||
result['starting_capital_fiat'], self._config['fiat_display_currency']
|
||||
) if result['starting_capital_fiat'] > 0 else ''
|
||||
output += (f" `, {starting_cap_fiat}`.\n"
|
||||
) if result['starting_capital_fiat'] > 0 else '.\n'
|
||||
output = ''
|
||||
if self._config['dry_run']:
|
||||
output += "*Warning:* Simulated balances in Dry Mode.\n"
|
||||
starting_cap = round_coin_value(
|
||||
result['starting_capital'], self._config['stake_currency'])
|
||||
output += f"Starting capital: `{starting_cap}`"
|
||||
starting_cap_fiat = round_coin_value(
|
||||
result['starting_capital_fiat'], self._config['fiat_display_currency']
|
||||
) if result['starting_capital_fiat'] > 0 else ''
|
||||
output += (f" `, {starting_cap_fiat}`.\n"
|
||||
) if result['starting_capital_fiat'] > 0 else '.\n'
|
||||
|
||||
total_dust_balance = 0
|
||||
total_dust_currencies = 0
|
||||
for curr in result['currencies']:
|
||||
curr_output = ''
|
||||
if curr['est_stake'] > balance_dust_level:
|
||||
if curr['is_position']:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`{curr['side']}: {curr['position']:.8f}`\n"
|
||||
f"\t`Leverage: {curr['leverage']:.1f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
else:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`Available: {curr['free']:.8f}`\n"
|
||||
f"\t`Balance: {curr['balance']:.8f}`\n"
|
||||
f"\t`Pending: {curr['used']:.8f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
elif curr['est_stake'] <= balance_dust_level:
|
||||
total_dust_balance += curr['est_stake']
|
||||
total_dust_currencies += 1
|
||||
|
||||
# Handle overflowing message length
|
||||
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output)
|
||||
output = curr_output
|
||||
total_dust_balance = 0
|
||||
total_dust_currencies = 0
|
||||
for curr in result['currencies']:
|
||||
curr_output = ''
|
||||
if curr['est_stake'] > balance_dust_level:
|
||||
if curr['is_position']:
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`{curr['side']}: {curr['position']:.8f}`\n"
|
||||
f"\t`Leverage: {curr['leverage']:.1f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
else:
|
||||
output += curr_output
|
||||
curr_output = (
|
||||
f"*{curr['currency']}:*\n"
|
||||
f"\t`Available: {curr['free']:.8f}`\n"
|
||||
f"\t`Balance: {curr['balance']:.8f}`\n"
|
||||
f"\t`Pending: {curr['used']:.8f}`\n"
|
||||
f"\t`Est. {curr['stake']}: "
|
||||
f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n")
|
||||
elif curr['est_stake'] <= balance_dust_level:
|
||||
total_dust_balance += curr['est_stake']
|
||||
total_dust_currencies += 1
|
||||
|
||||
if total_dust_balance > 0:
|
||||
output += (
|
||||
f"*{total_dust_currencies} Other "
|
||||
f"{plural(total_dust_currencies, 'Currency', 'Currencies')} "
|
||||
f"(< {balance_dust_level} {result['stake']}):*\n"
|
||||
f"\t`Est. {result['stake']}: "
|
||||
f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
|
||||
tc = result['trade_count'] > 0
|
||||
stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
|
||||
fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
|
||||
# Handle overflowing message length
|
||||
if len(output + curr_output) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output)
|
||||
output = curr_output
|
||||
else:
|
||||
output += curr_output
|
||||
|
||||
output += ("\n*Estimated Value*:\n"
|
||||
f"\t`{result['stake']}: "
|
||||
f"{round_coin_value(result['total'], result['stake'], False)}`"
|
||||
f"{stake_improve}\n"
|
||||
f"\t`{result['symbol']}: "
|
||||
f"{round_coin_value(result['value'], result['symbol'], False)}`"
|
||||
f"{fiat_val}\n")
|
||||
self._send_msg(output, reload_able=True, callback_path="update_balance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
if total_dust_balance > 0:
|
||||
output += (
|
||||
f"*{total_dust_currencies} Other "
|
||||
f"{plural(total_dust_currencies, 'Currency', 'Currencies')} "
|
||||
f"(< {balance_dust_level} {result['stake']}):*\n"
|
||||
f"\t`Est. {result['stake']}: "
|
||||
f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n")
|
||||
tc = result['trade_count'] > 0
|
||||
stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else ''
|
||||
fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else ''
|
||||
|
||||
output += ("\n*Estimated Value*:\n"
|
||||
f"\t`{result['stake']}: "
|
||||
f"{round_coin_value(result['total'], result['stake'], False)}`"
|
||||
f"{stake_improve}\n"
|
||||
f"\t`{result['symbol']}: "
|
||||
f"{round_coin_value(result['value'], result['symbol'], False)}`"
|
||||
f"{fiat_val}\n")
|
||||
self._send_msg(output, reload_able=True, callback_path="update_balance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _start(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1125,26 +1113,23 @@ class Telegram(RPCHandler):
|
|||
nrecent = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
nrecent = 10
|
||||
try:
|
||||
trades = self._rpc._rpc_trade_history(
|
||||
nrecent
|
||||
)
|
||||
trades_tab = tabulate(
|
||||
[[arrow.get(trade['close_date']).humanize(),
|
||||
trade['pair'] + " (#" + str(trade['trade_id']) + ")",
|
||||
f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"]
|
||||
for trade in trades['trades']],
|
||||
headers=[
|
||||
'Close Date',
|
||||
'Pair (ID)',
|
||||
f'Profit ({stake_cur})',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n"
|
||||
+ (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else ''))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
trades = self._rpc._rpc_trade_history(
|
||||
nrecent
|
||||
)
|
||||
trades_tab = tabulate(
|
||||
[[arrow.get(trade['close_date']).humanize(),
|
||||
trade['pair'] + " (#" + str(trade['trade_id']) + ")",
|
||||
f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"]
|
||||
for trade in trades['trades']],
|
||||
headers=[
|
||||
'Close Date',
|
||||
'Pair (ID)',
|
||||
f'Profit ({stake_cur})',
|
||||
],
|
||||
tablefmt='simple')
|
||||
message = (f"<b>{min(trades['trades_count'], nrecent)} recent trades</b>:\n"
|
||||
+ (f"<pre>{trades_tab}</pre>" if trades['trades_count'] > 0 else ''))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
@authorized_only
|
||||
def _delete_trade(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1155,18 +1140,14 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
if not context.args or len(context.args) == 0:
|
||||
raise RPCException("Trade-id not set.")
|
||||
trade_id = int(context.args[0])
|
||||
msg = self._rpc._rpc_delete(trade_id)
|
||||
self._send_msg((
|
||||
f"`{msg['result_msg']}`\n"
|
||||
'Please make sure to take care of this asset on the exchange manually.'
|
||||
))
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
if not context.args or len(context.args) == 0:
|
||||
raise RPCException("Trade-id not set.")
|
||||
trade_id = int(context.args[0])
|
||||
msg = self._rpc._rpc_delete(trade_id)
|
||||
self._send_msg((
|
||||
f"`{msg['result_msg']}`\n"
|
||||
'Please make sure to take care of this asset on the exchange manually.'
|
||||
))
|
||||
|
||||
@authorized_only
|
||||
def _performance(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1177,27 +1158,24 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
trades = self._rpc._rpc_performance()
|
||||
output = "<b>Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['pair']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_performance()
|
||||
output = "<b>Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['pair']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1208,31 +1186,28 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_enter_tag_performance(pair)
|
||||
output = "<b>Entry Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['enter_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_enter_tag_performance(pair)
|
||||
output = "<b>Entry Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['enter_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_enter_tag_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_enter_tag_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1243,31 +1218,28 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_exit_reason_performance(pair)
|
||||
output = "<b>Exit Reason Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['exit_reason']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_exit_reason_performance(pair)
|
||||
output = "<b>Exit Reason Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['exit_reason']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit_ratio']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_exit_reason_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_exit_reason_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1278,31 +1250,28 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
pair = None
|
||||
if context.args and isinstance(context.args[0], str):
|
||||
pair = context.args[0]
|
||||
|
||||
trades = self._rpc._rpc_mix_tag_performance(pair)
|
||||
output = "<b>Mix Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['mix_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
trades = self._rpc._rpc_mix_tag_performance(pair)
|
||||
output = "<b>Mix Tag Performance:</b>\n"
|
||||
for i, trade in enumerate(trades):
|
||||
stat_line = (
|
||||
f"{i+1}.\t <code>{trade['mix_tag']}\t"
|
||||
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
|
||||
f"({trade['profit']:.2%}) "
|
||||
f"({trade['count']})</code>\n")
|
||||
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
if len(output + stat_line) >= MAX_MESSAGE_LENGTH:
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML)
|
||||
output = stat_line
|
||||
else:
|
||||
output += stat_line
|
||||
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_mix_tag_performance",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(output, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_mix_tag_performance",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _count(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1313,18 +1282,15 @@ class Telegram(RPCHandler):
|
|||
:param update: message update
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
counts = self._rpc._rpc_count()
|
||||
message = tabulate({k: [v] for k, v in counts.items()},
|
||||
headers=['current', 'max', 'total stake'],
|
||||
tablefmt='simple')
|
||||
message = "<pre>{}</pre>".format(message)
|
||||
logger.debug(message)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_count",
|
||||
query=update.callback_query)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
counts = self._rpc._rpc_count()
|
||||
message = tabulate({k: [v] for k, v in counts.items()},
|
||||
headers=['current', 'max', 'total stake'],
|
||||
tablefmt='simple')
|
||||
message = "<pre>{}</pre>".format(message)
|
||||
logger.debug(message)
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML,
|
||||
reload_able=True, callback_path="update_count",
|
||||
query=update.callback_query)
|
||||
|
||||
@authorized_only
|
||||
def _locks(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1372,22 +1338,19 @@ class Telegram(RPCHandler):
|
|||
Handler for /whitelist
|
||||
Shows the currently active whitelist
|
||||
"""
|
||||
try:
|
||||
whitelist = self._rpc._rpc_whitelist()
|
||||
whitelist = self._rpc._rpc_whitelist()
|
||||
|
||||
if context.args:
|
||||
if "sorted" in context.args:
|
||||
whitelist['whitelist'] = sorted(whitelist['whitelist'])
|
||||
if "baseonly" in context.args:
|
||||
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
|
||||
if context.args:
|
||||
if "sorted" in context.args:
|
||||
whitelist['whitelist'] = sorted(whitelist['whitelist'])
|
||||
if "baseonly" in context.args:
|
||||
whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']]
|
||||
|
||||
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
|
||||
message += f"`{', '.join(whitelist['whitelist'])}`"
|
||||
message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n"
|
||||
message += f"`{', '.join(whitelist['whitelist'])}`"
|
||||
|
||||
logger.debug(message)
|
||||
self._send_msg(message)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
logger.debug(message)
|
||||
self._send_msg(message)
|
||||
|
||||
@authorized_only
|
||||
def _blacklist(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1425,30 +1388,27 @@ class Telegram(RPCHandler):
|
|||
Shows the latest logs
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
limit = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
limit = 10
|
||||
logs = RPC._rpc_get_logs(limit)['logs']
|
||||
msgs = ''
|
||||
msg_template = "*{}* {}: {} \\- `{}`"
|
||||
for logrec in logs:
|
||||
msg = msg_template.format(escape_markdown(logrec[0], version=2),
|
||||
escape_markdown(logrec[2], version=2),
|
||||
escape_markdown(logrec[3], version=2),
|
||||
escape_markdown(logrec[4], version=2))
|
||||
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
|
||||
# Send message immediately if it would become too long
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
msgs = msg + '\n'
|
||||
else:
|
||||
# Append message to messages to send
|
||||
msgs += msg + '\n'
|
||||
|
||||
if msgs:
|
||||
limit = int(context.args[0]) if context.args else 10
|
||||
except (TypeError, ValueError, IndexError):
|
||||
limit = 10
|
||||
logs = RPC._rpc_get_logs(limit)['logs']
|
||||
msgs = ''
|
||||
msg_template = "*{}* {}: {} \\- `{}`"
|
||||
for logrec in logs:
|
||||
msg = msg_template.format(escape_markdown(logrec[0], version=2),
|
||||
escape_markdown(logrec[2], version=2),
|
||||
escape_markdown(logrec[3], version=2),
|
||||
escape_markdown(logrec[4], version=2))
|
||||
if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH:
|
||||
# Send message immediately if it would become too long
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
msgs = msg + '\n'
|
||||
else:
|
||||
# Append message to messages to send
|
||||
msgs += msg + '\n'
|
||||
|
||||
if msgs:
|
||||
self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2)
|
||||
|
||||
@authorized_only
|
||||
def _edge(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1456,21 +1416,17 @@ class Telegram(RPCHandler):
|
|||
Handler for /edge
|
||||
Shows information related to Edge
|
||||
"""
|
||||
try:
|
||||
edge_pairs = self._rpc._rpc_edge()
|
||||
if not edge_pairs:
|
||||
message = '<b>Edge only validated following pairs:</b>'
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
edge_pairs = self._rpc._rpc_edge()
|
||||
if not edge_pairs:
|
||||
message = '<b>Edge only validated following pairs:</b>'
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
for chunk in chunks(edge_pairs, 25):
|
||||
edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple')
|
||||
message = (f'<b>Edge only validated following pairs:</b>\n'
|
||||
f'<pre>{edge_pairs_tab}</pre>')
|
||||
for chunk in chunks(edge_pairs, 25):
|
||||
edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple')
|
||||
message = (f'<b>Edge only validated following pairs:</b>\n'
|
||||
f'<pre>{edge_pairs_tab}</pre>')
|
||||
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||
|
||||
@authorized_only
|
||||
def _help(self, update: Update, context: CallbackContext) -> None:
|
||||
|
@ -1551,12 +1507,9 @@ class Telegram(RPCHandler):
|
|||
Handler for /health
|
||||
Shows the last process timestamp
|
||||
"""
|
||||
try:
|
||||
health = self._rpc._health()
|
||||
message = f"Last process: `{health['last_process_loc']}`"
|
||||
self._send_msg(message)
|
||||
except RPCException as e:
|
||||
self._send_msg(str(e))
|
||||
health = self._rpc._health()
|
||||
message = f"Last process: `{health['last_process_loc']}`"
|
||||
self._send_msg(message)
|
||||
|
||||
@authorized_only
|
||||
def _version(self, update: Update, context: CallbackContext) -> None:
|
||||
|
|
|
@ -68,6 +68,7 @@ class Webhook(RPCHandler):
|
|||
RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
|
||||
RPCMessageType.WHITELIST,
|
||||
RPCMessageType.ANALYZED_DF,
|
||||
RPCMessageType.NEW_CANDLE,
|
||||
RPCMessageType.STRATEGY_MSG):
|
||||
# Don't fail for non-implemented types
|
||||
return None
|
||||
|
|
|
@ -739,10 +739,10 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||
"""
|
||||
pair = str(metadata.get('pair'))
|
||||
|
||||
new_candle = self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]['date']
|
||||
# Test if seen this pair and last candle before.
|
||||
# always run if process_only_new_candles is set to false
|
||||
if (not self.process_only_new_candles or
|
||||
self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]['date']):
|
||||
if not self.process_only_new_candles or new_candle:
|
||||
|
||||
# Defs that only make change on new candle data.
|
||||
dataframe = self.analyze_ticker(dataframe, metadata)
|
||||
|
@ -751,7 +751,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||
|
||||
candle_type = self.config.get('candle_type_def', CandleType.SPOT)
|
||||
self.dp._set_cached_df(pair, self.timeframe, dataframe, candle_type=candle_type)
|
||||
self.dp._emit_df((pair, self.timeframe, candle_type), dataframe)
|
||||
self.dp._emit_df((pair, self.timeframe, candle_type), dataframe, new_candle)
|
||||
|
||||
else:
|
||||
logger.debug("Skipping TA Analysis for already analyzed candle")
|
||||
|
|
|
@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||
|
||||
Launching this strategy would be:
|
||||
|
||||
freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates
|
||||
freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates
|
||||
--freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json
|
||||
|
||||
or the user simply adds this to their config:
|
||||
|
@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
|||
process_only_new_candles = True
|
||||
stoploss = -0.05
|
||||
use_exit_signal = True
|
||||
startup_candle_count: int = 300
|
||||
startup_candle_count: int = 30
|
||||
can_short = True
|
||||
|
||||
# Hyperoptable parameters
|
||||
|
|
|
@ -7,14 +7,17 @@
|
|||
"# Strategy analysis example\n",
|
||||
"\n",
|
||||
"Debugging a strategy can be time-consuming. Freqtrade offers helper functions to visualize raw data.\n",
|
||||
"The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location."
|
||||
"The following assumes you work with SampleStrategy, data for 5m timeframe from Binance and have downloaded them into the data directory in the default location.\n",
|
||||
"Please follow the [documentation](https://www.freqtrade.io/en/stable/data-download/) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Change Working directory to repository root"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -23,7 +26,38 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"# Change directory\n",
|
||||
"# Modify this cell to insure that the output shows the correct path.\n",
|
||||
"# Define all paths relative to the project root shown in the cell output\n",
|
||||
"project_root = \"somedir/freqtrade\"\n",
|
||||
"i=0\n",
|
||||
"try:\n",
|
||||
" os.chdirdir(project_root)\n",
|
||||
" assert Path('LICENSE').is_file()\n",
|
||||
"except:\n",
|
||||
" while i<4 and (not Path('LICENSE').is_file()):\n",
|
||||
" os.chdir(Path(Path.cwd(), '../'))\n",
|
||||
" i+=1\n",
|
||||
" project_root = Path.cwd()\n",
|
||||
"print(Path.cwd())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure Freqtrade environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from freqtrade.configuration import Configuration\n",
|
||||
"\n",
|
||||
"# Customize these according to your needs.\n",
|
||||
|
@ -31,14 +65,14 @@
|
|||
"# Initialize empty configuration object\n",
|
||||
"config = Configuration.from_files([])\n",
|
||||
"# Optionally (recommended), use existing configuration file\n",
|
||||
"# config = Configuration.from_files([\"config.json\"])\n",
|
||||
"# config = Configuration.from_files([\"user_data/config.json\"])\n",
|
||||
"\n",
|
||||
"# Define some constants\n",
|
||||
"config[\"timeframe\"] = \"5m\"\n",
|
||||
"# Name of the strategy class\n",
|
||||
"config[\"strategy\"] = \"SampleStrategy\"\n",
|
||||
"# Location of the data\n",
|
||||
"data_location = config['datadir']\n",
|
||||
"data_location = config[\"datadir\"]\n",
|
||||
"# Pair to analyze - Only use one pair here\n",
|
||||
"pair = \"BTC/USDT\""
|
||||
]
|
||||
|
@ -56,12 +90,12 @@
|
|||
"candles = load_pair_history(datadir=data_location,\n",
|
||||
" timeframe=config[\"timeframe\"],\n",
|
||||
" pair=pair,\n",
|
||||
" data_format = \"hdf5\",\n",
|
||||
" data_format = \"json\", # Make sure to update this to your data\n",
|
||||
" candle_type=CandleType.SPOT,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# Confirm success\n",
|
||||
"print(\"Loaded \" + str(len(candles)) + f\" rows of data for {pair} from {data_location}\")\n",
|
||||
"print(f\"Loaded {len(candles)} rows of data for {pair} from {data_location}\")\n",
|
||||
"candles.head()"
|
||||
]
|
||||
},
|
||||
|
@ -328,7 +362,7 @@
|
|||
"# Show graph inline\n",
|
||||
"# graph.show()\n",
|
||||
"\n",
|
||||
"# Render graph in a seperate window\n",
|
||||
"# Render graph in a separate window\n",
|
||||
"graph.show(renderer=\"browser\")\n"
|
||||
]
|
||||
},
|
||||
|
@ -365,7 +399,7 @@
|
|||
"metadata": {
|
||||
"file_extension": ".py",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.7 64-bit ('trade_397')",
|
||||
"display_name": "Python 3.9.7 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
|
|
18
freqtrade/util/gc_setup.py
Normal file
18
freqtrade/util/gc_setup.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import gc
|
||||
import logging
|
||||
import platform
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def gc_set_threshold():
|
||||
"""
|
||||
Reduce number of GC runs to improve performance (explanation video)
|
||||
https://www.youtube.com/watch?v=p4Sn6UcFTOU
|
||||
|
||||
"""
|
||||
if platform.python_implementation() == "CPython":
|
||||
# allocs, g1, g2 = gc.get_threshold()
|
||||
gc.set_threshold(50_000, 500, 1000)
|
||||
logger.debug("Adjusting python allocations to reduce GC runs")
|
|
@ -291,12 +291,17 @@ class Wallets:
|
|||
return self._check_available_stake_amount(stake_amount, available_amount)
|
||||
|
||||
def validate_stake_amount(self, pair: str, stake_amount: Optional[float],
|
||||
min_stake_amount: Optional[float], max_stake_amount: float):
|
||||
min_stake_amount: Optional[float], max_stake_amount: float,
|
||||
trade_amount: Optional[float]):
|
||||
if not stake_amount:
|
||||
logger.debug(f"Stake amount is {stake_amount}, ignoring possible trade for {pair}.")
|
||||
return 0
|
||||
|
||||
max_stake_amount = min(max_stake_amount, self.get_available_stake_amount())
|
||||
if trade_amount:
|
||||
# if in a trade, then the resulting trade size cannot go beyond the max stake
|
||||
# Otherwise we could no longer exit.
|
||||
max_stake_amount = min(max_stake_amount, max_stake_amount - trade_amount)
|
||||
|
||||
if min_stake_amount is not None and min_stake_amount > max_stake_amount:
|
||||
if self._log:
|
||||
|
|
|
@ -29,6 +29,7 @@ nav:
|
|||
- Parameter table: freqai-parameter-table.md
|
||||
- Feature engineering: freqai-feature-engineering.md
|
||||
- Running FreqAI: freqai-running.md
|
||||
- Reinforcement Learning: freqai-reinforcement-learning.md
|
||||
- Developer guide: freqai-developers.md
|
||||
- Short / Leverage: leverage.md
|
||||
- Utility Sub-commands: utils.md
|
||||
|
@ -40,6 +41,7 @@ nav:
|
|||
- Backtest analysis: advanced-backtesting.md
|
||||
- Advanced Topics:
|
||||
- Advanced Post-installation Tasks: advanced-setup.md
|
||||
- Trade Object: trade-object.md
|
||||
- Advanced Strategy: strategy-advanced.md
|
||||
- Advanced Hyperopt: advanced-hyperopt.md
|
||||
- Producer/Consumer mode: producer-consumer.md
|
||||
|
|
|
@ -3,30 +3,31 @@
|
|||
-r requirements-plot.txt
|
||||
-r requirements-hyperopt.txt
|
||||
-r requirements-freqai.txt
|
||||
-r requirements-freqai-rl.txt
|
||||
-r docs/requirements-docs.txt
|
||||
|
||||
coveralls==3.3.1
|
||||
flake8==5.0.4
|
||||
flake8==6.0.0
|
||||
flake8-tidy-imports==4.8.0
|
||||
mypy==0.991
|
||||
pre-commit==2.20.0
|
||||
pre-commit==2.21.0
|
||||
pytest==7.2.0
|
||||
pytest-asyncio==0.20.2
|
||||
pytest-asyncio==0.20.3
|
||||
pytest-cov==4.0.0
|
||||
pytest-mock==3.10.0
|
||||
pytest-random-order==1.0.4
|
||||
isort==5.10.1
|
||||
pytest-random-order==1.1.0
|
||||
isort==5.11.4
|
||||
# For datetime mocking
|
||||
time-machine==2.8.2
|
||||
# fastapi testing
|
||||
httpx==0.23.1
|
||||
|
||||
# Convert jupyter notebooks to markdown documents
|
||||
nbconvert==7.2.5
|
||||
nbconvert==7.2.7
|
||||
|
||||
# mypy types
|
||||
types-cachetools==5.2.1
|
||||
types-filelock==3.2.7
|
||||
types-requests==2.28.11.5
|
||||
types-requests==2.28.11.7
|
||||
types-tabulate==0.9.0.0
|
||||
types-python-dateutil==2.8.19.4
|
||||
types-python-dateutil==2.8.19.5
|
||||
|
|
9
requirements-freqai-rl.txt
Normal file
9
requirements-freqai-rl.txt
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Include all requirements to run the bot.
|
||||
-r requirements-freqai.txt
|
||||
|
||||
# Required for freqai-rl
|
||||
torch==1.13.1
|
||||
stable-baselines3==1.6.2
|
||||
sb3-contrib==1.6.2
|
||||
# Gym is forced to this version by stable-baselines3.
|
||||
gym==0.21
|
|
@ -7,5 +7,5 @@ scikit-learn==1.1.3
|
|||
joblib==1.2.0
|
||||
catboost==1.1.1; platform_machine != 'aarch64'
|
||||
lightgbm==3.3.3
|
||||
xgboost==1.7.1
|
||||
xgboost==1.7.2
|
||||
tensorboard==2.11.0
|
||||
|
|
|
@ -5,5 +5,5 @@
|
|||
scipy==1.9.3
|
||||
scikit-learn==1.1.3
|
||||
scikit-optimize==0.9.0
|
||||
filelock==3.8.0
|
||||
filelock==3.8.2
|
||||
progressbar2==4.2.0
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
numpy==1.23.5
|
||||
pandas==1.5.1
|
||||
numpy==1.24.1
|
||||
pandas==1.5.2
|
||||
pandas-ta==0.3.14b
|
||||
|
||||
ccxt==2.1.96
|
||||
ccxt==2.4.60
|
||||
# Pin cryptography for now due to rust build errors with piwheels
|
||||
cryptography==38.0.1; platform_machine == 'armv7l'
|
||||
cryptography==38.0.3; platform_machine != 'armv7l'
|
||||
cryptography==38.0.4; platform_machine != 'armv7l'
|
||||
aiohttp==3.8.3
|
||||
SQLAlchemy==1.4.44
|
||||
python-telegram-bot==13.14
|
||||
SQLAlchemy==1.4.45
|
||||
python-telegram-bot==13.15
|
||||
arrow==1.2.3
|
||||
cachetools==4.2.2
|
||||
requests==2.28.1
|
||||
urllib3==1.26.12
|
||||
jsonschema==4.17.0
|
||||
urllib3==1.26.13
|
||||
jsonschema==4.17.3
|
||||
TA-Lib==0.4.25
|
||||
technical==1.3.0
|
||||
tabulate==0.9.0
|
||||
pycoingecko==3.1.0
|
||||
jinja2==3.1.2
|
||||
tables==3.7.0
|
||||
blosc==1.10.6
|
||||
blosc==1.11.1
|
||||
joblib==1.2.0
|
||||
pyarrow==10.0.0; platform_machine != 'armv7l'
|
||||
pyarrow==10.0.1; platform_machine != 'armv7l'
|
||||
|
||||
# find first, C search in arrays
|
||||
py_find_1st==1.1.5
|
||||
|
@ -30,13 +30,13 @@ py_find_1st==1.1.5
|
|||
# Load ticker files 30% faster
|
||||
python-rapidjson==1.9
|
||||
# Properly format api responses
|
||||
orjson==3.8.2
|
||||
orjson==3.8.3
|
||||
|
||||
# Notify systemd
|
||||
sdnotify==0.3.2
|
||||
|
||||
# API Server
|
||||
fastapi==0.87.0
|
||||
fastapi==0.88.0
|
||||
pydantic==1.10.2
|
||||
uvicorn==0.20.0
|
||||
pyjwt==2.6.0
|
||||
|
@ -47,7 +47,7 @@ psutil==5.9.4
|
|||
colorama==0.4.6
|
||||
# Building config files interactively
|
||||
questionary==1.10.0
|
||||
prompt-toolkit==3.0.32
|
||||
prompt-toolkit==3.0.36
|
||||
# Extensions to datetime library
|
||||
python-dateutil==2.8.2
|
||||
|
||||
|
|
11
setup.py
11
setup.py
|
@ -15,6 +15,14 @@ freqai = [
|
|||
'scikit-learn',
|
||||
'catboost; platform_machine != "aarch64"',
|
||||
'lightgbm',
|
||||
'xgboost'
|
||||
]
|
||||
|
||||
freqai_rl = [
|
||||
'torch',
|
||||
'stable-baselines3',
|
||||
'gym==0.21',
|
||||
'sb3-contrib'
|
||||
]
|
||||
|
||||
develop = [
|
||||
|
@ -36,7 +44,7 @@ jupyter = [
|
|||
'nbconvert',
|
||||
]
|
||||
|
||||
all_extra = plot + develop + jupyter + hyperopt + freqai
|
||||
all_extra = plot + develop + jupyter + hyperopt + freqai + freqai_rl
|
||||
|
||||
setup(
|
||||
tests_require=[
|
||||
|
@ -90,6 +98,7 @@ setup(
|
|||
'jupyter': jupyter,
|
||||
'hyperopt': hyperopt,
|
||||
'freqai': freqai,
|
||||
'freqai_rl': freqai_rl,
|
||||
'all': all_extra,
|
||||
},
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user